pax_global_header00006660000000000000000000000064147005610010014504gustar00rootroot0000000000000052 comment=e256e7016ca01662d20501cadb3cc6cdf4275055 nipy-0.6.1/000077500000000000000000000000001470056100100124675ustar00rootroot00000000000000nipy-0.6.1/.coveragerc000066400000000000000000000002261470056100100146100ustar00rootroot00000000000000[run] branch = True source = nipy include = */nipy/* omit = */nipy/fixes/* */nipy/externals/* */benchmarks/* */bench/* */setup.py nipy-0.6.1/.git-blame-ignore-revs000066400000000000000000000001731470056100100165700ustar00rootroot0000000000000000f00c5929f1f93389c12c786522eccb301c896b 0adde5085031aed3c2807061bd4e88b901910f76 b53c35c7eb3ac78b22c584bec7b56e414690791f nipy-0.6.1/.gitattributes000066400000000000000000000000421470056100100153560ustar00rootroot00000000000000nipy/COMMIT_INFO.txt export-subst nipy-0.6.1/.github/000077500000000000000000000000001470056100100140275ustar00rootroot00000000000000nipy-0.6.1/.github/workflows/000077500000000000000000000000001470056100100160645ustar00rootroot00000000000000nipy-0.6.1/.github/workflows/coverage.yml000066400000000000000000000022701470056100100204030ustar00rootroot00000000000000name: coverage on: push: branches: [main] pull_request: branches: [main] concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: report: runs-on: ubuntu-latest strategy: matrix: python-version: ["3"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Install run: | pip install -r dev-requirements.txt pip install . - name: Show environment run: env - name: Library tests run: | mkdir tmp cd tmp pytest --doctest-plus --ignore-glob="__config__.py" \ --cov=nipy --cov-report xml --cov-config=../.coveragerc \ --pyargs nipy - name: See what's where run: | pwd ls -lR .. - name: Upload to codecov uses: codecov/codecov-action@v4 with: files: tmp/coverage.xml env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} nipy-0.6.1/.github/workflows/doc-build.yml000066400000000000000000000020221470056100100204450ustar00rootroot00000000000000name: doc-build on: push: branches: [main] pull_request: branches: [main] concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: report: runs-on: ubuntu-latest strategy: matrix: python-version: ["3"] steps: - name: Apt update run: sudo apt update - name: Install graphviz run: | sudo apt install -y graphviz - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Install run: | pip install -r doc-requirements.txt pip install . - name: Show environment run: env - name: Build documentation run: | cd doc make html - name: Run documentation doctests run: | cd doc make clean make doctest nipy-0.6.1/.github/workflows/lint.yml000066400000000000000000000012741470056100100175610ustar00rootroot00000000000000name: style on: [push, pull_request] concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: format: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.12"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install packages run: | pip install --upgrade pip pip install pre-commit pip list - name: Lint run: pre-commit run --all-files --show-diff-on-failure --color always nipy-0.6.1/.github/workflows/test.yml000066400000000000000000000026331470056100100175720ustar00rootroot00000000000000name: Test on: push: branches: - main pull_request: branches: - main permissions: contents: read # to fetch code (actions/checkout) defaults: run: shell: bash concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: test: strategy: matrix: python_version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] os: [ubuntu-latest, windows-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python_version }} allow-prereleases: true - name: Install run: | pip install -r dev-requirements.txt pip install . - name: Show environment run: env - name: Library tests run: | mkdir tmp cd tmp pytest --doctest-plus --ignore-glob="__config__.py" --pyargs nipy bench: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install run: | pip install -r dev-requirements.txt pip install . - name: Show environment run: env - name: Library tests run: | mkdir tmp cd tmp pytest -s -c ../bench.ini --pyargs nipy nipy-0.6.1/.gitignore000066400000000000000000000017541470056100100144660ustar00rootroot00000000000000# Editor temporary/working/backup files # ######################################### .#* [#]*# *~ *$ *.bak *.diff *.org .project *.rej .settings/ .*.sw[nop] .sw[nop] *.tmp *.orig # Not sure what the next two are for *.kpf *-stamp # Compiled source # ################### *.a *.com *.class *.dll *.exe *.o *.py[oc] *.so *.pyd __pycache__/ # Packages # ############ # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z *.bz2 *.bzip2 *.dmg *.gz *.iso *.jar *.rar *.tar *.tbz2 *.tgz *.zip # Python files # ################ MANIFEST build/ build-install/ _build dist/ *.egg-info .shelf/ .tox/ .coverage .buildbot.patch # Logs and databases # ###################### *.log *.sql *.sqlite # OS generated files # ###################### .gdb_history .DS_Store? ehthumbs.db Icon? Thumbs.db # Things specific to this project # ################################### __config__.py doc/api/generated doc/build/ doc/manual cythonize.dat version_check_tmp/ nipy-0.6.1/.mailmap000066400000000000000000000062161470056100100141150ustar00rootroot00000000000000Alexis Roche Alexis ROCHE Ariel Rokem arokem Ariel Rokem arokem Benjamin Thyreau benjamin.thyreau <> Benjamin Thyreau benji2@decideur.info <> Bertrand Thirion Bertrand THIRION Bertrand Thirion bertrand.thirion <> Bertrand Thirion bthirion Christopher Burns Chris Christopher Burns cburns <> Cindee Madison Cindee Madison Cindee Madison cindee.madison <> Cindee Madison cindeem <> Cindee Madison cindeem Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Eleftherios Garyfallidis Erik Ziegler erikz Fabian Pedregosa Fernando Perez fdo.perez <> Gael Varoquaux Gael varoquaux Gael Varoquaux GaelVaroquaux Gael Varoquaux GaelVaroquaux Gael Varoquaux gvaroquaux Gael Varoquaux varoquau Jarrod Millman Jarrod Millman Jarrod Millman jarrod.millman <> Jean-Baptiste Poline JB Jean-Baptiste Poline jbpoline Joke Durnez jokedurnez Jonathan Taylor jonathan.taylor <> Jonathan Taylor jtaylo Martin Bergtholdt Matthew Brett matthew.brett <> Matthew Brett mb312 Matthieu Brucher Merlin Keller Merlin KELLER Merlin Keller keller Nicholas Tolley <> Nicholas Tolley <55253912+ntolley@users.noreply.github.com> Tom Waite twaite Virgile Fritsch VirgileFritsch Virgile Fritsch Fritsch Matteo Visconti di Oleggio Castello Matteo Visconti dOC Ben Beasley Benjamin A. Beasley # and below the ones to fill out Paris Sprint Account Philippe CIUCIU Thomas VINCENT <20100thomas@gmail.com> alan brian.hawthorne <> davclark <> denis.riviere <> michael.castelle <> mike.trumpis <> sebastien.meriaux <> tim.leslie <> yann.cointepas <> nipy-0.6.1/.pre-commit-config.yaml000066400000000000000000000015011470056100100167450ustar00rootroot00000000000000# pre-commit install repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # v4.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: debug-statements - id: check-ast - id: mixed-line-ending - id: check-yaml args: [--allow-multiple-documents] - id: check-added-large-files - repo: https://github.com/pre-commit/mirrors-prettier rev: fc260393cc4ec09f8fc0a5ba4437f481c8b55dc1 # frozen: v3.0.3 hooks: - id: prettier files: \.(md|rst|toml|yml|yaml) args: [--prose-wrap=preserve] - repo: https://github.com/astral-sh/ruff-pre-commit rev: ef9b09598d53bbcde9cd388ac73a145e67537b44 # frozen: v0.6.6 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] nipy-0.6.1/AUTHOR000066400000000000000000000017301470056100100134150ustar00rootroot00000000000000Alexis Roche Ariel Rokem Ben Beasley Bertrand Thirion Benjamin Thyreau Brian Hawthrorne Ben Cipollini Chris Burns Chris Markiewicz Cindee Madison Elvis Dohmatob Endolith Fabian Pedregosa Fernando Perez Gael Varoquaux Jarrod Millman Jean-Baptiste Poline Jonathan Taylor Matthew Brett Matteo Visconti dOC Merlin Keller Michael Waskom Mike Trumpis Stefan van der Walt Tim Leslie Tom Waite Virgile Fritsch Yannick Schwartz Yaroslav Halchenko nipy-0.6.1/Changelog000066400000000000000000000161571470056100100143130ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim:ft=rst .. _changelog: NIPY Changelog -------------- NIPY is not only a module for neuroimaing analysis but an umbrella for other Python neuroimaging-related projects -- see https://github.com/nipy and http://www.nipy.org for more information about their releases. 'Close gh-' statements refer to GitHub issues that are available at:: http://github.com/nipy/nipy/issues The full VCS changelog is available here: http://github.com/nipy/nipy/commits/main Releases ~~~~~~~~ Abbreviated authors are: * MB - Matthew Brett * BT - Bertrand Thirion * AR - Alexis Roche * GV - Gaël Varoquaux * YH - Yarik Halchenko * JBP - Jean-Baptiste Poline * JT - Jonathan Taylor * BB - Ben Beasley * CM - Chris Markiewicz * JM - Jarrod Millman * SvdW - Stéfan van der Walt * 0.6.1 (Saturday 5 October 2024) Compatibility release for Numpy 2.0 * Port code for Numpy 2.0 compatibility (MB) * Update for test precision on Sympy 1.13 (MB) * Clean up consts and casts in C code (BB) * Refactoring to functools.cached_property, style and CI updates (CM) * CI and automated style check updates (Dimitri Papadopoulos Orfanos) * Fix for Viz example (Nicholas Tolley) * Add spin tooling for working with repository checkout (SvdW) * Fix shebangs for some development scripts (Étienne Mollier) * 0.6.0 (Thursday 21 December 2023) Bugfix, refactoring and compatibility release. Much thankless maintenance duty particularly by CM. Oh wait - not thankless - thank you! * Huge cleanup of old dependencies for installation and build (BB). * Allow for Nibabel deprecations and removals, particularly ``get_data`` (BB). * Build refactor to ``pyproject.toml`` (CM) * Various cleanups in spelling and script mechanics (Dimitri Papadopoulos). * Move to pytest / pytest-doctestplus for testing (JM, MB). * Various improvements to development process and CI (JM, MB, SvdW). * Port build process from Numpy distutils to Meson (SvdW). * Drop Python 2 support. * Various bugfixes for modern Numpy (BB, MB). * Drop Cython C files and depend on Cython for build (MB). * Fixes to temporary files in Mayavi calls (fazledyn-or, CM). * 0.5.0 (Saturday 27 March 2021) Bugfix, refactoring and compatibility release. * Heroic work to update Nipy for recent versions of Numpy, Sympy, Nose, Scipy, and numpydoc - many thanks to Matteo Visconti di Oleggio Castello. * Some fixes to harmonize interpolation with recent changes / fixes in interpolation in Scipy (MB). * Move script installation logic to use setuptools (MB). * Some more updates for modern Numpy (MB). * Fixes for changes in Sympy, by updating some formulae to use Piecewise (YH). * 0.4.2 (Saturday 17 February 2018) Bugfix, refactoring and compatibility release. * Fixes for compatibility with released versions of Sympy and Numpy, including some incorrect results from the Euler calculations; * Fixes for deprecated escape sequences in docstrings (thanks to Klaus Sembritzki); * Fixes for compatibility with Windows in various configurations, now tested with Appveyor builds; * Various continuous integration and doc build fixes; * The advent of Windows wheels on release - most credit to the Scipy folks for building Scipy on Windows. * 0.4.1 (Friday 10 February 2017) Bugfix, refactoring and compatibility release. * New discrete cosine transform functions for building basis sets; * Fixes for compatibility with Python 3.6; * Fixes for compatibility with Numpy 1.12 (1.12 no longer allows floating point values for indexing and other places where an integer value is required); * Fixes for compatibility with Sympy 1.0; * Drop compatibility with Python 2.6, 3.2, 3.3; * Add ability to pass plotting arguments to ``plot_anat`` function (Matteo Visconti dOC); * Some helpers for working with OpenFMRI datasets; * Signal upcoming change in return shape from ``make_recarray`` when passing in an array for values. Allow user to select upcoming behavior with keyword argument; * Bug fix for axis selection when using record arrays in numpies <= 1.7.1; * Add flag to allow SpaceTimeRealign to read TR from image headers; * 0.4.0 (Saturday 18 October 2015) Bugfix, refactoring and compatibility release. * Full port to Python 3 using single code-base; * Various fixes for modern numpy, scipy, sympy, nibabel compatibility; * Refactor space-time realignment (AR); * Change in interface for slice-timing options in space-time realign (AR+MB); * New ``nipy_4d_realign`` script to run space-time realign (Ariel Rokem); * Drop requirement for BLAS / LAPACK external library at build-time (AR); * Move much code out of nipy.labs into main tree (AR, BT); * Deprecate remaining code in nipy.labs (AR, BT); * Updates to landmark learning code including API (BT); * Various fixes to design matrix machinery (BT, Michael Waskom); * Fix to two-sample permutation test suggested by github user jwirsich (BF); * Refactoring and fixes to design matrix drift calculations (JBP); * Extending API of resampling code to allow more ndimage kwargs (JBP); * Start of new example on OpenFMRI ds105 dataset (JT); * New ``block_design`` function for designs with specified onsets (JT); * New ``show_contrast`` function for reviewing contrasts (JT); * Fix for bug in ``nipy_diagnose`` script / ``screens`` module giving incorrect PCA output; * Added SPM HRF to other HRF options; * Redesign concept of an image "space" with new image space modules, functions, classes; * Various fixes for correct installation provided or suggested by YH; * Some visualization changes by Elvis Dohmatob; * 0.3.0 (Saturday 2 February 2013) Bugfix, refactoring and compatibility release. * Addition of EM algorithm for mixed effects analysis (BT) * New high-level GLM class interface (BT) * nipy diagnostic scripts save PCA and tsdifana vectors to npz file * Python 3 compatibility through 3.3 (MB) * Fixes for compatibility with upcoming Numpy 1.7 * Fixes to background and axis specification in visualization tools (GV, BT) * Fixes and tests for installed nipy scripts (MB) * Fix to optimization parameters for Realign4D - thanks to `bpinsard` * Fix 0 in affine diagonal for TR=0 in affines by default (MB) * Allow saving of nipy images loaded from nifti files that lack explicit affine (MB) * Allow `slice_order=None` to `FmriRealign4D` when not doing time interpolation (AR); check for valid slice order specification (YH) * Refactoring of quantile routine to move code out of C library (AR) * Fix bug in resampling of unsigned int images (AR) * Custom doctest machinery to work round differences of dtype repr on different platforms, and to skip doctests with optional dependencies (MB) * Script to run examples for testing (MB) * Fix for accidental integer division of frametimes in design matrix generation (Fabian Pedregosa) * Various other fixes and refactorings with thanks from (AR, BT, MB, YH, Yannick Schwartz, Virgile Fritsch) * 0.2.0 (Sunday 22 July 2012) The first ever official release. - > 30 contributors - > 6 years in development - 192 issues closed on github nipy-0.6.1/LICENSE000066400000000000000000000030041470056100100134710ustar00rootroot00000000000000Copyright (c) 2006-2024, NIPY Developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the NIPY Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nipy-0.6.1/MANIFEST.in000066400000000000000000000015001470056100100142210ustar00rootroot00000000000000include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* THANKS include Changelog TODO include *.py include site.* recursive-include nipy *.c *.h *.pyx *.pxd recursive-include lib *.c *.h *.pyx *.pxd remake recursive-include scripts * recursive-include tools * # put this stuff back into setup.py (package_data) once I'm enlightened # enough to accomplish this herculean task recursive-include nipy/algorithms/tests/data * include nipy/testing/*.nii.gz include nipy/algorithms/diagnostics/tests/data/*.mat include nipy/algorithms/statistics/models/tests/*.bin include nipy/labs/spatial_models/tests/*.nii include nipy/modalities/fmri/tests/*.npz include nipy/modalities/fmri/tests/*.mat include nipy/modalities/fmri/tests/*.txt include nipy/COMMIT_INFO.txt include LICENSE graft examples graft doc global-exclude *~ *.swp *.pyc nipy-0.6.1/Makefile000066400000000000000000000035131470056100100141310ustar00rootroot00000000000000# Automating common tasks for NIPY development PYTHON ?= python HTML_DIR = doc/build/html LATEX_DIR = doc/build/latex WWW_DIR = doc/dist DOCSRC_DIR = doc PROJECT = nipy clean-pyc: find . -regex ".*\.pyc" -exec rm -rf "{}" \; clean: clean-pyc find . -regex ".*\.so" -exec rm -rf "{}" \; find . -regex ".*\.pyd" -exec rm -rf "{}" \; find . -regex ".*~" -exec rm -rf "{}" \; find . -regex ".*#" -exec rm -rf "{}" \; rm -rf build $(MAKE) -C doc clean clean-dev: clean dev distclean: clean -rm MANIFEST -rm $(COVERAGE_REPORT) @find . -name '*.py[co]' \ -o -name '*.a' \ -o -name '*,cover' \ -o -name '.coverage' \ -o -iname '*~' \ -o -iname '*.kcache' \ -o -iname '*.pstats' \ -o -iname '*.prof' \ -o -iname '#*#' | xargs -L10 rm -f -rm -r dist -rm build-stamp -rm -r .tox -git clean -fxd install: $(PYTHON) -m pip install . editable: $(PYTHON) -m pip install --no-build-isolation --editable . # Print out info for possible install methods check-version-info: bash tools/show_version_info.sh source-release: distclean $(PYTHON) -m build . --sdist tox-fresh: # tox tests with fresh-installed virtualenvs. Needs network. And # pytox, obviously. tox -c tox.ini # Website stuff $(WWW_DIR): if [ ! -d $(WWW_DIR) ]; then mkdir -p $(WWW_DIR); fi htmldoc: cd $(DOCSRC_DIR) && $(MAKE) html pdfdoc: cd $(DOCSRC_DIR) && $(MAKE) latex cd $(LATEX_DIR) && $(MAKE) all-pdf html: html-stamp html-stamp: $(WWW_DIR) htmldoc cp -r $(HTML_DIR)/* $(WWW_DIR) touch $@ pdf: pdf-stamp pdf-stamp: $(WWW_DIR) pdfdoc cp $(LATEX_DIR)/*.pdf $(WWW_DIR) touch $@ website: website-stamp website-stamp: $(WWW_DIR) html-stamp pdf-stamp cp -r $(HTML_DIR)/* $(WWW_DIR) touch $@ upload-html: html-stamp ./tools/upload-gh-pages.sh $(WWW_DIR) $(PROJECT) refresh-readme: $(PYTHON) tools/refresh_readme.py nipy .PHONY: orig-src pylint nipy-0.6.1/README.rst000066400000000000000000000052711470056100100141630ustar00rootroot00000000000000.. -*- rest -*- .. vim:syntax=rst .. image:: https://codecov.io/gh/nipy/nipy/branch/main/graph/badge.svg :target: https://app.codecov.io/gh/nipy/nipy/branch/main ==== NIPY ==== Neuroimaging tools for Python. The aim of NIPY is to produce a platform-independent Python environment for the analysis of functional brain imaging data using an open development model. In NIPY we aim to: 1. Provide an open source, mixed language scientific programming environment suitable for rapid development. 2. Create software components in this environment to make it easy to develop tools for MRI, EEG, PET and other modalities. 3. Create and maintain a wide base of developers to contribute to this platform. 4. To maintain and develop this framework as a single, easily installable bundle. NIPY is the work of many people. We list the main authors in the file ``AUTHOR`` in the NIPY distribution, and other contributions in ``THANKS``. Website ======= Current information can always be found at the `NIPY project website `_. Mailing Lists ============= For questions on how to use nipy or on making code contributions, please see the ``neuroimaging`` mailing list: https://mail.python.org/mailman/listinfo/neuroimaging Please report bugs at github issues: https://github.com/nipy/nipy/issues You can see the list of current proposed changes at: https://github.com/nipy/nipy/pulls Code ==== You can find our sources and single-click downloads: * `Main repository`_ on Github; * Documentation_ for all releases and current development tree; * Download the `current development version`_ as a tar/zip file; * Downloads of all `available releases`_. .. _main repository: https://github.com/nipy/nipy .. _Documentation: http://nipy.org/nipy .. _current development version: https://github.com/nipy/nipy/archive/main.zip .. _available releases: http://pypi.python.org/pypi/nipy Tests ===== To run nipy's tests, you will need to install the pytest_ Python testing package:: pip install pytest Then:: pytest nipy You can run the doctests along with the other tests with:: pip install pytest-doctestplus Then:: pytest --doctest-plus nipy Installation ============ See the latest `installation instructions`_. License ======= We use the 3-clause BSD license; the full license is in the file ``LICENSE`` in the nipy distribution. .. links: .. _python: http://python.org .. _numpy: http://numpy.org .. _scipy: http://scipy.org .. _sympy: http://sympy.org .. _nibabel: http://nipy.org/nibabel .. _ipython: http://ipython.org .. _matplotlib: http://matplotlib.org .. _pytest: http://pytest.org .. _installation instructions: http://nipy.org/nipy/users/installation.html nipy-0.6.1/THANKS000066400000000000000000000010001470056100100133710ustar00rootroot00000000000000NIPY is an open source project for neuroimaging analysis using Python. It is a community project. Many people have contributed to NIPY, in code development, and they are (mainly) listed in the AUTHOR file. Others have contributed greatly in code review, discussion, and financial support. Below is a partial list. If you've been left off, please let us know (neuroimaging at python.org), and we'll add you. Michael Castelle Philippe Ciuciu Dav Clark Yann Cointepas Mark D'Esposito Denis Riviere Karl Young nipy-0.6.1/bench.ini000066400000000000000000000000761470056100100142520ustar00rootroot00000000000000[pytest] python_files = bench_*.py python_functions = bench_* nipy-0.6.1/dev-requirements.txt000066400000000000000000000002171470056100100165270ustar00rootroot00000000000000# Requirements for running tests -r requirements.txt pytest>=7.2 pytest-doctestplus pytest-cov>=4.0 matplotlib coverage pre-commit build twine nipy-0.6.1/doc-requirements.txt000066400000000000000000000002641470056100100165200ustar00rootroot00000000000000# Requirements for building docs # Check these dependencies against doc/conf.py -r dev-requirements.txt sphinx>=7.0 numpydoc>=1.6.0 matplotlib texext ipython # Optional, huge: vtk nipy-0.6.1/doc/000077500000000000000000000000001470056100100132345ustar00rootroot00000000000000nipy-0.6.1/doc/.gitignore000066400000000000000000000000201470056100100152140ustar00rootroot00000000000000labs/generated/ nipy-0.6.1/doc/Makefile000066400000000000000000000074061470056100100147030ustar00rootroot00000000000000# Makefile for Sphinx documentation # PYTHON ?= python DIST_DIR = dist # You can set these variables from the command line. SPHINXOPTS = #-q # suppress all output but warnings SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean pdf all dist htmlonly api html pickle htmlhelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html make HTML and API documents" @echo " htmlonly make HTML documents only" @echo " api make API documents only" @echo " latex make LaTeX documents (you can set\ PAPER=a4 or PAPER=letter)" @echo " all make HTML, API and PDF documents" @echo " clean remove all generated documents" @echo @echo " linkcheck check all external links for integrity" @echo " doctest run doctests in reST files" @echo " pdf make and run the PDF generation" @echo " dist make and put results in $DIST_DIR/" @echo " gitwash-update update git workflow from source repo" # Commented these out, wasn't clear if we'd use these targets or not. # @echo " pickle to make pickle files (usable by e.g. sphinx-web)" # @echo " htmlhelp to make HTML files and a HTML help project" # @echo " changes to make an overview over all changed/added/deprecated items" clean: -rm -rf build/* $(DIST_DIR)/* *~ api/generated labs/generated -rm -f manual pdf: latex cd build/latex && make all-pdf all: html pdf dist: clean all mkdir -p $(DIST_DIR) ln build/latex/nipy*.pdf $(DIST_DIR) cp -a build/html/* $(DIST_DIR) @echo "Build finished. Final docs are in $(DIST_DIR)" htmlonly: mkdir -p build/html build/doctrees $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html @echo @echo "Build finished. The HTML pages are in build/html." api: $(PYTHON) ../tools/build_modref_templates.py @echo "Build API docs finished." html: api htmlonly -ln -s build manual @echo "Build HTML and API finished." gitwash-update: $(PYTHON) ../tools/gitwash_dumper.py devel/guidelines nipy \ --github-user=nipy \ --project-url=http://nipy.org/nipy \ --project-ml-url=https://mail.python.org/mailman/listinfo/neuroimaging pickle: mkdir -p build/pickle build/doctrees $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle @echo @echo "Build finished; now you can process the pickle files or run" @echo " sphinx-web build/pickle" @echo "to start the sphinx-web server." htmlhelp: mkdir -p build/htmlhelp build/doctrees $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in build/htmlhelp." latex: api mkdir -p build/latex build/doctrees $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex # Clear bug for longtable column output in sphinx $(PYTHON) ../tools/fix_longtable.py build/latex/nipy.tex @echo @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: mkdir -p build/changes build/doctrees $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes @echo @echo "The overview file is in build/changes." linkcheck: mkdir -p build/linkcheck build/doctrees $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in build/linkcheck/output.txt." clean-doctest: clean doctest # Clean avoids testing API docs doctest: mkdir -p build/doctest build/doctrees $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest @echo @echo "The overview file is in build/doctest." nipy-0.6.1/doc/README.txt000066400000000000000000000036221470056100100147350ustar00rootroot00000000000000==================== Nipy Documentation ==================== This is the top level build directory for the nipy documentation. All of the documentation is written using Sphinx_, a python documentation system built on top of reST_. Dependencies ============ In order to build the documentation, you must have: * Sphinx 1.0 or greater * nipy and all its dependencies so that nipy can import * matplotlib * latex (for the PNG mathematics graphics) * graphviz (for the inheritance diagrams) For the Python dependencies, do:: pip install -r ../doc-requirements.txt Files and directories ===================== This directory contains: * Makefile - the build script to build the HTML or PDF docs. Type ``make help`` for a list of options. * users - the user documentation. * devel - documentation for developers. * faq - frequently asked questions * api - placeholders to automatically generate the api documentation * www - source files for website only reST documentss which should not go in the generated PDF documentation. * links_names.txt - reST document with hyperlink targets for common links used throughout the documentation * .rst files - some top-level documentation source files * conf.py - the sphinx configuration. * sphinxext - some extensions to sphinx to handle math, ipython syntax highlighting, numpy_ docstring parsing, and autodocs. * _static - used by the sphinx build system. * _templates - used by the sphinx build system. Building the documentation -------------------------- Instructions for building the documentation are in the file: ``devel/guidelines/howto_document.rst`` .. Since this README.txt is not processed by Sphinx during the .. documentation build, I've included the links directly so it is at .. least a valid reST doc. .. _Sphinx: http://sphinx.pocoo.org/ .. _reST: http://docutils.sourceforge.net/rst.html .. _numpy: http://www.scipy.org/NumPy .. vim: ft=rst nipy-0.6.1/doc/_static/000077500000000000000000000000001470056100100146625ustar00rootroot00000000000000nipy-0.6.1/doc/_static/nipy.css000066400000000000000000000177521470056100100163670ustar00rootroot00000000000000/** * Alternate Sphinx design * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; /*background-color: #AFC1C4; */ background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } td.linenos pre { padding: 0.5em 0; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } hr { border: 1px solid #abc; margin: 2em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; border: 0; } tt.descclassname { background-color: transparent; border: 0; } tt.xref { background-color: transparent; font-weight: bold; border: 0; } a tt { background-color: transparent; font-weight: bold; border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.class, dl.function { border-top: 2px solid #888; } dl.method, dl.attribute { border-top: 1px solid #aaa; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } pre { line-height: 120%; } pre a { color: inherit; text-decoration: underline; } .first { margin-top: 0 !important; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } /* div.documentwrapper { width: 100%; } */ div.clearer { clear: both; } div.related h3 { display: none; } div.related ul { background-image: url(navigation.png); height: 2em; list-style: none; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 0; padding-left: 10px; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body a { text-decoration: underline; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; text-align: left; /* margin-left: -100%; */ } div.sphinxsidebar h4, div.sphinxsidebar h3 { margin: 1em 0 0.5em 0; font-size: 0.9em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; list-style: none; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { list-style: square; margin-left: 20px; } p { margin: 0.8em 0 0.5em 0; } p.rubric { font-weight: bold; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } div.pagination { margin-top: 2em; padding-top: 0.5em; border-top: 1px solid black; text-align: center; } div.sphinxsidebar ul.toc { margin: 1em 0 1em 0; padding: 0 0 0 0.5em; list-style: none; } div.sphinxsidebar ul.toc li { margin: 0.5em 0 0.5em 0; font-size: 0.9em; line-height: 130%; } div.sphinxsidebar ul.toc li p { margin: 0; padding: 0; } div.sphinxsidebar ul.toc ul { margin: 0.2em 0 0.2em 0; padding: 0 0 0 1.8em; } div.sphinxsidebar ul.toc ul li { padding: 0; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 0 0; border: 1px solid #86989B; background-color: #f7f7f7; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #ccc; color: white!important; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } img.inheritance { border: 0px } form.pfform { margin: 10px 0 20px 0; } table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } nipy-0.6.1/doc/_static/reggie2.png000066400000000000000000000224731470056100100167240ustar00rootroot00000000000000PNG  IHDRLZsRGBbKGD pHYs.#.#x?vtIME` IDATx՝yUeߟ眻;3 ;;_" -ȴ˴ͥKVh~,5%-\(-R-ٷ=. 2зz^˜{9y>|~CR QOpO@ D (PtQ(P.]KᣕPo7hQſ](?[Ge,GwZ] =A0 T  ]#'pBD E48E`tRi=e_ XƎ"P.bF3+l ]~&o߿7]Qt JDv֤W߻X9֘BL۾L2Gy&Mm YRp\>W_-)u$?\n;x' udɧ+WSB̙֮C{{{Z#"Ga)h J((]vy۶mv B[,cρqꩧ0vdYZ}Q0dnG.`y7]3EkËW0s?S?,R w$X[Y-f!&?T*u@|l)xT 'o L;<MMMuY=^Mkx{>5!}Ͻz,|qUAa <݃=C}~t'ɶN='#dZXF:O3 bՈ/J$ac={/fڵ="MO,"뷵->Kg{ J*ۇq]V(PEe m72v]>XÇD;l6DB9lE\K#v;k\uL:Gi(4%#nL2J`%2$|g,LIwPcV uk6PQQq@**p|lE{A ͠a|عs'˖-6uv\D?xT[ C*ŝ`U5DQ4Z,a񀒩Ԫ 1bF tz-d1pw[7)`z060T 1a UR#58YpZBA @8ӧ#`%ԷadyD*m:r6\Akkk[d x0pD&I!?JКf}v~Q$K]$I-$ Q A^Aҍijܭb1yLD"TBd\DT.Gp}i\#|`{>?A?%UE&ɂX) TcxӷC{+p~e4{J,Uaʹ["d ؘDǀ!YtiܹsD"\+TH%`Af>3d4%(?WjC~gG )j+pqC{wj@'O!kg <`(Ws!Lߟ0CeK kJxafC Xx,3e~J]r%mk>F +hi=_N"yQBA?*r$y>w55 pRᯪ&kYN"U!2D6`byL8H8`O*LC}T ^JhM:u1=m8(%rBwXR >7^^y" J1|&ȧ[ɶyD={イ}֊ݱ"** ছjHfE'0qwo,{[35IXm1=~{K'[[Y[Xen[."6B!9q[[1sYj$J*fW{/3inn歷b=X|Oc+h>u>vepsD‘9!sDu]®l!o ~gHvBཔ$*W-{_b_K @@9W 8ӸY#"XJt!,J<|*ÈGH$XrAc_߻v\s~5&RP+";4MX9)|^+c0 ?8y=Zy{L<{lBl\ipHsOWQ! 5Ti<^yFy3>E.UPN sďпAyn|56=Ҟ.n*|_eƍXǞx饗زi+ o*ar*G8eg9o0Ծj]=Ÿ p$@P >3J'SOK2~L)yr ߚRP$ ~ʞ|IRݎD5\9oc֭[ioo #'5$,WFŐ!C)RV\KQ#/:u* hn6҄NXk_JƦFAe+[Fl$'袔,9i?>o?<R]!ҧ>%| `#Gk-7)O}}=&MZ _G~ 5C̠A4is^͎hL'xK$?x<Ά Kȑ# XMC@>L"趁%K現*>Sa?{UY QJ=p?BoZd uuuzظu d|:M#B'P=[~Gcc#bTd}Db>:yΛ~f͢zſz}~ثb.F^X GO8zP6YQ0H`˖-m#i2&"i$S&ߙ;%Kǹ{1b~u#.-[иmn`Ӗ|ԏ#F,HkGxN8++ >}(Y&ww3 5ȈҞǏ8y*ZG&*)_ m9jmmm|6"}kTcVWH`F8nTbؾ<[팟r<_q^]_Dt1DD|<9І>$ɇ}{뮻sMohojukITx"DE0HG6V"Є3!;T>yرc 5I%G4qnA` tY"oN|ዳ./C(PAaDCd[0i/@>h#ě0~TUg? +BK3!qׯ_dY__H=E<-'8"kf#x=i;vŵlǣ:܀HbVDp_ v`kH6?3$M8h)L1:-ɥ4U!8lݺѣGXJ)͛mwS{kwEoW݂TZk$OI}đW5D &eX0tܸql޼-l̰!#RɼK6ىBC 혴A:&,"U3Hsxdȴijk2bBk֬R*T EzZ.rJ(a<[mDZ`'9 OwD4}߾#Pڇgt}*pN:vmP'ώVltGw>D9}No:2h4AϚp[.~H]]-,x|cwIŲTWDzI&wpriřg~;Qn{HP#P؞iѼe1uUQ;.p\}^sQJɵ?{7]. {DwqW2-+B,MG#X,}ѣp7ᖛo!TՆH<4  R$)Vq/~=pM7\ "Dk͒w_/I`0Hmm-,ylOƌ*h϶k0N<>URN]B4y?(0H9E#,:\qHCy[Mb%fF~ZJՐ qϠ ɒ? JոK;/4J4׿?i#M)+wI[7$h8,FMr"S|a:t/˚`Y'pGy.JإB}{ niR>}1VHyOcz+/I$6p77 VU %lCg>gfcP\;vΡ2#NiYWb ABZk/_N˰3^:qG·H$CDXt=\t楷7I %ɓ۸ꪫ|?<A0d&M:~ؾk'MvўkO-f6/gƌ 2lG2jQ1:B/IF]\|.ԃ"|L]1)pUW~fL?T^LJf^G)M Lq\0I$ :>zÞ+cYJOV-:LQRJ I} ҇JAg~yw~Id'Nֳ/ށx!Z3ạ>Jihh7d̘1=y'/2fsud[ꫯ/Q3g[ۓ]p= #ΑHbZ6*p4%ҥKikk[?1cFY)0 CƎ8 Uae~/I&1~xVXA3Zqb.brp-rt1 ?-'z( %kkkYnZ|'hg߿$޳ĉy F_%OkC dR|m ü֏㵟|WIEXCCJ y@$]kt,SO=$Sxx G=8 F ֫>w>̽z(!)^׵AlΜ9(hhh`Ĉyʝ˗BzղeD%x[Ծ"v(AoRbL׳b h>|8^{-+V`…h9qHN0z3F+?^x(e@k3'7m"#~6V )HǼQa9wRNu5!"zΝ~p|&3&tyF"%K #Z%kI^ :T=L a0˦eVذ)L&]z|pbҸziZ$,ZkΞ8g g(T=TAVSO:<;$Ձ4$mwEr,|\+fP8*/rw<ϓoHVE0xz ,˦vޚRJg]0,ާֲs`mDzIDATAwlcC2 xT \+Kq's 7pN^2kM;hSGH[R0Kv fРbVإQG{9BN{1rgDakp'"7o(ca6v\ҏ{m'U@%ˌAA\:@.K+dҘtE{^_ 8ԉZƵV\U7j+o.KWm k3&hHzGVCa]f8JN3^WhkաxtH]7Sg1˿r]N>D:;S`N(?}P"^4YA;jtyXy C\/oe3uzVvCʩw)[>Hb老ѴXGjMvD"1bW9w4z·GfK <-9L+HL9ZSzk2ieEY.Jeޛ ̝;*SD{GJ 5q U(^.v$ΐ6RI 3HIoyG_._WYp!tIUBq=_]v1Up[B@ /]lwˡi/?Ok)U*l3g,{De9q]CPXοWM6zU־GEQԆ64n[^չ\ b&#F`[(\y^ %|&"+5UߏtHys_J) S۷O&w$Vy8SL=L2ARA|>߿DwŕzhyRc'/СC8p .,Ǝ% z's2xȐ2QQQQ.gQG3buC7R$Nc߿\r&Æ +DKR +JFbY6-{Y4GZ_Eai3ʞ}u ±G4e|OV{=ǽQր"%8gIENDB`nipy-0.6.1/doc/_templates/000077500000000000000000000000001470056100100153715ustar00rootroot00000000000000nipy-0.6.1/doc/_templates/layout.html000066400000000000000000000056161470056100100176040ustar00rootroot00000000000000{% extends "!layout.html" %} {% set title = 'Neuroimaging in Python' %} {% block rootrellink %}
  • NIPY home
  • {% endblock %} {% block extrahead %} {% endblock %} {% block header %} {% endblock %} {# This block gets put at the top of the sidebar #} {% block sidebarlogo %}

    Site Navigation

    NIPY Community

    Github repo

    {% endblock %} {# I had to copy the whole search block just to change the rendered text, so it doesn't mention modules or classes #} {%- block sidebarsearch %} {%- if pagename != "search" %} {%- endif %} {# The sidebarsearch block is the last one available in the default sidebar() macro, so the only way to add something to the bottom of the sidebar is to put it here, at the end of the sidebarsearch block (before it closes). #} {%- endblock %} nipy-0.6.1/doc/api/000077500000000000000000000000001470056100100140055ustar00rootroot00000000000000nipy-0.6.1/doc/api/index.rst000066400000000000000000000001731470056100100156470ustar00rootroot00000000000000.. _api-index: ##### API ##### .. only:: html :Release: |version| :Date: |today| .. include:: generated/gen.rst nipy-0.6.1/doc/bibtex/000077500000000000000000000000001470056100100145115ustar00rootroot00000000000000nipy-0.6.1/doc/bibtex/README.txt000066400000000000000000000016301470056100100162070ustar00rootroot00000000000000.. Using -*- rst -*- (ReST) mode for emacs editing .. We don't expect this file to appear in the output documentation =============== Bibtex folder =============== This folder is for bibtex bibliographies, for citations in NIPY documentation. At the moment there is no standard bibtex mechanism in sphinx_, but we keep be the bibs here, waiting for the time that this is done. They also provide the sources for script conversion to ReST_. For script conversion, we have used: http://code.google.com/p/bibstuff/ For example, let's say in your ReST_ page ``example.rst`` you have something like this:: I here cite the VTK book [VTK4]_ and you've got a bibtex entry starting ``@book{VTK4,`` in a file ``vtk.bib``, then you could run this command:: bib4txt.py -i example.rst vtk.bib which would output, to the terminal, the ReST_ text you could add to the bottom of ``example.rst`` to create the reference. nipy-0.6.1/doc/bibtex/vtk.bib000066400000000000000000000003561470056100100157770ustar00rootroot00000000000000@book{VTK4, author={Will Schroeder and Ken Martin and Bill Lorensen}, title={{The Visualization Toolkit--An Object-Oriented Approach To 3D Graphics}}, publisher={Kitware, Inc.}, edition={Fourth}, year={2006} } nipy-0.6.1/doc/conf.py000066400000000000000000000162601470056100100145400ustar00rootroot00000000000000# vi: set ft=python sts=4 ts=4 sw=4 et: # # sampledoc documentation build configuration file, created by # sphinx-quickstart on Tue Jun 3 12:40:24 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import os import sys from importlib import import_module import sphinx import sphinx.ext.doctest # Doc generation depends on being able to import project project = 'nipy' try: project_module = import_module(project) except ImportError: raise RuntimeError(f'Cannot import {project}, please investigate') # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.append(os.path.abspath('sphinxext')) # General configuration # --------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'texext.mathcode', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'texext.math_dollar', 'numpydoc', 'sphinx.ext.inheritance_diagram', 'matplotlib.sphinxext.plot_directive', 'IPython.sphinxext.ipython_console_highlighting', ] # Autosummary on autosummary_generate=True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # copyright = ':ref:`2005-2018, Neuroimaging in Python team. # `' copyright = '2005-2024, Neuroimaging in Python team' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = project_module.__version__ # The full version, including alpha/beta/rc tags. release = version # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directories, that shouldn't # be searched for source files. # exclude_trees = [] # what to put into API doc (just class doc, just init, or both) autoclass_content = 'class' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'nipy.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'NIPY Documentation' # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Content template for the index page. html_index = 'index.html' # Custom sidebar templates, maps document names to template names. # html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If true, the reST sources are included in the HTML build as _sources/. html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = project # Options for LaTeX output # ------------------------ # Additional stuff for the LaTeX preamble. _latex_preamble = r""" \usepackage{amsmath} \usepackage{amssymb} % Uncomment these two if needed %\usepackage{amsfonts} %\usepackage{txfonts} """ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', 'preamble': _latex_preamble, } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class # [howto/manual]). latex_documents = [ ('documentation', 'nipy.tex', 'Neuroimaging in Python Documentation', 'Neuroimaging in Python team.','manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None if sphinx.version_info[:2] < (1, 4): # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = True else: # Sphinx >= 1.4 latex_toplevel_sectioning = 'part' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_use_modindex = True # Doctesting helpers doctest_global_setup = """\ import numpy as np from numpy import array try: import vtk except ImportError: vtk = None """ _sedd = sphinx.ext.doctest.doctest doctest_default_flags = (_sedd.ELLIPSIS | _sedd.IGNORE_EXCEPTION_DETAIL | _sedd.DONT_ACCEPT_TRUE_FOR_1 | _sedd.NORMALIZE_WHITESPACE) # Numpy extensions # ---------------- # Worked out by Steven Silvester in # https://github.com/scikit-image/scikit-image/pull/1356 numpydoc_show_class_members = False numpydoc_class_members_toctree = False nipy-0.6.1/doc/devel/000077500000000000000000000000001470056100100143335ustar00rootroot00000000000000nipy-0.6.1/doc/devel/code_discussions/000077500000000000000000000000001470056100100176735ustar00rootroot00000000000000nipy-0.6.1/doc/devel/code_discussions/brainvisa_repositories.rst000066400000000000000000000211201470056100100252060ustar00rootroot00000000000000.. _brainvisa_repositories: Can NIPY get something interesting from BrainVISA databases? ============================================================ I wrote this document to try to give more information to the NIPY developers about the present and future of :term:`BrainVISA` database system. I hope it will serve the discussion opened by Jarrod Millman about a possible collaboration between the two projects on this topic. Unfortunately, I do not know other projects providing similar features (such as BIRN) so I will only focus on BrainVISA. Yann Cointepas 2006-11-21 Introduction ------------ In BrainVISA, all the database system is home made and written in Python. This system is based on the file system and allows to do requests for both reading and writing (get the name of non existing files). We will change this in the future by defining an API (such the one introduced below) and by using at least two implementations, one relying on a relational database system and one compatible with the actual database system. Having one single API will make it possible, for instance, to work on huge databases located on servers and on smaller databases located in a laptop directory (with some synchronization features). This system will be independent from the BrainVISA application, it could be packaged separately. Unfortunately, we cannot say when this work will be done (our developments are slowed because all our lab will move in a new institute in January 2007). Here is a summary describing actual BrainVISA database system and some thoughts of what it may become. What is a database in BrainVISA today? -------------------------------------- A directory is a BrainVISA database if the structure of its sub-directories and the file names in this directory respect a set of rules. These rules make it possible to BrainVISA to scan the whole directory contents and to identify without ambiguity the database elements. These elements are composed of the following information: * *Data type:* identify the contents of a data (image, mesh, functional image, anatomical RM, etc). The data types are organized in hierarchy making it possible to decline a generic type in several specialized types. For example, there is a 4D Image type which is specialized in 3D Image. 3D Image is itself declined in several types of which T1 MRI and Brain mask. * *File format:* Represent the format of files used to record a data. BrainVISA is able to recognize several file formats (for example DICOM, Analyze/SPM, GIS, etc). It is easy to add new data formats and to provide converters to make it possible for existing processes to use these new formats. * *Files:* contains the names of the files (and/or directories) used to record the data. * *Attributes:* an attribute is an association between a name and a value. A set of attributes is associated with each element of BrainVISA database. This set represents all of the characteristics of a data (as the image size, the name of the protocol corresponding to the data or the acquisition parameters). Attributes values are set by BrainVISA during directory scanning (typically protocol, group, subject, etc.). It is possible to completely define the set of rules used to convert a directory in a BrainVISA database. That allows the use of BrainVISA without having to modify an existing file organization. However, the writing of such a system of rules requires very good knowledge of BrainVISA. This is why BrainVISA is provided with a default data organization system that can be used easily. A database can be used for deciding where to write data. The set of rules is used to generate the appropriate file name according to the data type, file format and attributes. This is a key feature that greatly helps the users and allow automation. It is not mandatory to use a database to process data with BrainVISA. However, some important features are not available when you are using data which are not in a database. For example, the BrainVISA ability to construct a default output file name when an input data is selected in a process relies on the database system. Moreover, some processes use the database system to find data; for example, the brain mask viewer tries to find the T1 MRI used to build the brain mask in order to superimpose both images in an Anatomist window. A few thoughts about a possible API for repositories ---------------------------------------------------- I think the most important point for data repositories is to define an user API. This API should be independent of data storage and of data organization. Data organization is important because it is very difficult to find a single organization that covers the needs of all users in the long term. In this API, each data item should have an unique identifier (let’s call it an URL). The rest of the API could be divided in two parts: #. An indexation system managing data organization. It defines properties attached to data items (for instance, “group” or “subject” can be seen as properties of an FMRI image) as well as possible user requests on the data. This indexation API could have several implementations (relational database, BIRN, BrainVISA, etc.). #. A data storage system managing the link between the URL of a data item and its representation on a local file system. This system should take into account various file formats and various file storage systems (e.g. on a local file system, on a distant ftp site, as bytes blocks in a relational database). This separation between indexation and storage is important for the design of databases, it makes it possible, for instance, to use distant or local data storage, or to define several indexations (i.e. several data organizations) for the same data. However indexation and data storage are not always independent. For example, they are independent if we use a relational database for indexation and URLs for storage, but they are not if file or directory names give indexation information (like in BrainVISA databases described above). At the user level, things can be simpler because the separation can be hidden in one object: the repository. A repository is composed of one indexation system and one data storage system and manage all the links between them. The user can send requests to the repository and receive a set of data items. Each data item contains indexation information (via the indexation system) and gives access to the data (via the storage system). Here is a sample of what-user-code-could-be to illustrate what I have in mind followed by a few comments: :: # Get an access to one repository repository = openRepository( repositoryURL ) # Create a request for selection of all the FMRI in the repository request = ‘SELECT * FROM FMRI’ # Iterate on data items in the repository for item in repository.select( request ): print item.url # Item is a directory-like structure for properties access for property in item: print property, ‘=’, item[ property ] # Retrieve the file(s) (and directorie(s) if any) from the data storage system # and convert it to NIFTI format (if necessary). files = item.getLocalFiles( format=’NIFTI’ ) niftiFileName = files[ 0 ] # Read the image and do something with it ... #. I do not yet have a good idea of how to represent requests. Here, I chose to use SQL since it is simple to understand. #. This code does not make any assumption on the properties that are associated to an FMRI image. #. The method getLocalFiles can do nothing more than return a file name if the data item correspond to a local file in NIFTI format. But the same code can be used to access a DICOM image located in a distant ftp server. In this case, getLocalFiles will manage the transfer of the DICOM file, then the conversion to the required NIFTI format and return name of temporary file(s). #. getLocalFiles cannot always return just one file name because on the long term, there will be many data types (FMRI, diffusion MRI, EEG, MEG, etc.) that are going to be stored in the repositories. These different data will use various file formats. Some of these formats can use a combination of files and directories (for instance, CTF MEG raw data are stored in a directory (``*.ds``), the structural sulci format of BrainVISA is composed of a file(``*.arg``) and a directory (``*.data``), NIFTI images can be in one or two files, etc. ). #. The same kind of API can be used for writing data items in a repository. One could build a data item, adds properties and files and call something like repository.update( item ). nipy-0.6.1/doc/devel/code_discussions/comparisons/000077500000000000000000000000001470056100100222305ustar00rootroot00000000000000nipy-0.6.1/doc/devel/code_discussions/comparisons/index.rst000066400000000000000000000002601470056100100240670ustar00rootroot00000000000000.. _comparisons: ================= Software Design ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 vtk_datasets nipy-0.6.1/doc/devel/code_discussions/comparisons/vtk_datasets.rst000066400000000000000000000147421470056100100254660ustar00rootroot00000000000000============== VTK datasets ============== Here we describe the VTK dataset model, because of some parallels with our own idea of an image object. The document is from the VTK book - [VTK4]_ See also: * http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/data.html#vtk-data-structures * http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/auto/example_datasets.html * http://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python * http://www.vtk.org/VTK/img/file-formats.pdf * https://svn.enthought.com/enthought/attachment/wiki/MayaVi/tvtk_datasets.pdf?format=raw * http://public.kitware.com/cgi-bin/viewcvs.cgi/*checkout*/Examples/DataManipulation/Python/BuildUGrid.py?root=VTK&content-type=text/plain What is a VTK dataset? ====================== VTK datasets represent discrete spatial data. Datasets consist of two components: * *organizing structure* - the topology and geometry * *data attributes* - data that can be attached to the topology / geometry above. Structure: topology / geometry ------------------------------ The structure part of a dataset is the part that gives the position and connection of points in 3D space. Let us first import *vtk* for our code examples. .. doctest:: :skipif: vtk is None >>> import vtk An *id* is an index into a given vector --------------------------------------- We introduce *id* to explain the code below. An id is simply an index into a vector, and is therefore an integer. Of course the id identifies the element in the vector; as long as you know which vector the id refers to, you can identify the element. .. doctest:: :skipif: vtk is None >>> pts = vtk.vtkPoints() >>> id = pts.InsertNextPoint(0, 0, 0) >>> id == 0 True >>> id = pts.InsertNextPoint(0, 1, 0) >>> id == 1 True >>> pts.GetPoint(1) == (0.0, 1.0, 0.0) True A dataset has one or more points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Points have coordinates in 3 dimensions, in the order ``x``, ``y``, ``z`` - see http://www.vtk.org/doc/release/5.4/html/a00374.html - ``GetPoint()`` .. doctest:: :skipif: vtk is None >>> pts = vtk.vtkPoints() >>> pts.InsertNextPoint(0, 0) # needs 3 coordinates Traceback (most recent call last): ... TypeError: function takes exactly 3 arguments (2 given) >>> _ = pts.InsertNextPoint(0, 0, 0) # returns point index in point array >>> pts.GetPoint(0) (0.0, 0.0, 0.0) >>> _ = pts.InsertNextPoint(0, 1, 0) >>> _ = pts.InsertNextPoint(0, 0, 1) A dataset has one or more cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A cell is a local specification of the connection between points - an atom of topology in VTK. A cell has a type, and a list of point ids. The point type determines (by convention) what the connectivity of the list of points should be. For example we can make a cell of type ``vtkTriangle``. The first point starts the triangle, the next point is the next point in the triangle counterclockwise, connected to the first and third, and the third is the remaining point, connected to the first and second. .. doctest:: :skipif: vtk is None >>> VTK_TRIANGLE = 5 # A VTK constant identifying the triangle type >>> triangle = vtk.vtkTriangle() >>> isinstance(triangle, vtk.vtkCell) True >>> triangle.GetCellType() == VTK_TRIANGLE True >>> pt_ids = triangle.GetPointIds() # these are default (zeros) at the moment >>> [pt_ids.GetId(i) for i in range(pt_ids.GetNumberOfIds())] == [0, 0, 0] True Here we set the ids. The ids refer to the points above. The system does not know this yet, but it will because, later, we are going to associate this cell with the points, in a dataset object. .. doctest:: :skipif: vtk is None >>> for i in range(pt_ids.GetNumberOfIds()): pt_ids.SetId(i, i) Associating points and cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We make the most general possible of VTK datasets - the unstructured grid. .. doctest:: :skipif: vtk is None >>> ugrid = vtk.vtkUnstructuredGrid() >>> ugrid.Allocate(1, 1) >>> ugrid.SetPoints(pts) >>> id = ugrid.InsertNextCell(VTK_TRIANGLE, pt_ids) Data attributes --------------- So far we have specified a triangle, with 3 points, but no associated data. You can associate data with cells, or with points, or both. Point data associates values (e.g. scalars) with the points in the dataset. Cell data associates values (e.g. scalars) with the cells - in this case one (e.g) scalar value with the whole triangle. .. doctest:: :skipif: vtk is None >>> pt_data = ugrid.GetPointData() >>> cell_data = ugrid.GetCellData() There are many data attributes that can be set, include scalars, vectors, normals (normalized vectors), texture coordinates and tensors, using (respectively) ``{pt|cell|_data.{Get|Set}{Scalars|Vectors|Normals|TCoords|Tensors}``. For example: .. doctest:: :skipif: vtk is None >>> pt_data.GetScalars() is None True But we can set the scalar (or other) data: .. doctest:: :skipif: vtk is None >>> tri_pt_data = vtk.vtkFloatArray() >>> for i in range(3): _ = tri_pt_data.InsertNextValue(i) >>> _ = pt_data.SetScalars(tri_pt_data) To the cells as well, or instead, if we want. Don't forget there is only one cell. .. doctest:: :skipif: vtk is None >>> tri_cell_data = vtk.vtkFloatArray() >>> _ = tri_cell_data.InsertNextValue(3) >>> _ = cell_data.SetScalars(tri_cell_data) You can set different types of data into the same dataset: .. doctest:: :skipif: vtk is None >>> tri_pt_vecs = vtk.vtkFloatArray() >>> tri_pt_vecs.SetNumberOfComponents(3) >>> tri_pt_vecs.InsertNextTuple3(1, 1, 1) >>> tri_pt_vecs.InsertNextTuple3(2, 2, 2) >>> tri_pt_vecs.InsertNextTuple3(3, 3, 3) >>> _ = pt_data.SetVectors(tri_pt_vecs) If you want to look at what you have, run this code :: # ..testcode:: when live # make a dataset mapper and actor for our unstructured grid mapper = vtk.vtkDataSetMapper() mapper.SetInput(ugrid) actor = vtk.vtkActor() actor.SetMapper(mapper) # Create the usual rendering stuff. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # add the actor ren.AddActor(actor) # Render the scene and start interaction. iren.Initialize() renWin.Render() iren.Start() .. [VTK4] Schroeder, Will, Ken Martin, and Bill Lorensen. (2006) *The Visualization Toolkit--An Object-Oriented Approach To 3D Graphics*. : Kitware, Inc. nipy-0.6.1/doc/devel/code_discussions/coordmap_notes.rst000066400000000000000000001116001470056100100234400ustar00rootroot00000000000000.. _coordmap-discussion: ######################################## Some discussion notes on coordinate maps ######################################## These notes contain some email discussion between Jonathan Taylor, Bertrand Thirion and Gael Varoquaux about coordinate maps, coordinate systems and transforms. They are a little bit rough and undigested in their current form, but they might be useful for background. The code and discussion below mentions ideas like ``LPIImage``, ``XYZImage`` and ``AffineImage``. These were image classes that constrained their coordinate maps to have input and output axes in a particular order. We eventually removed these in favor of automated reordering of image axes on save, and explicit reordering of images that needed known axis ordering. .. some working notes :: import sympy i, j, k = sympy.symbols('i, j, k') np.dot(np.array([[0,0,1],[1,0,0],[0,1,0]]), np.array([i,j,k])) kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) ijk_to_kij([i,j,k]) kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) ijk_to_kij([i,j,k]) kij_to_RAS = compose(ijk_to_kij, ijk_to_RAS) kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij) kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij.inverse()) kij_to_RAS kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) # Check that it does the right permutation ijk_to_kij([i,j,k]) # Yup, now let's try to make a kij_to_RAS transform # At first guess, we might try kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij) # but we have a problem, we've asked for a composition that doesn't make sense kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij.inverse()) kij_to_RAS # check that things are working -- I should get the same value at i=20,j=30,k=40 for both mappings, only the arguments are reversed ijk_to_RAS([i,j,k]) kij_to_RAS([k,i,j]) another_kij_to_RAS = ijk_to_RAS.reordered_domain('kij') another_kij_to_RAS([k,i,j]) # rather than finding the permutation matrix your self another_kij_to_RAS = ijk_to_RAS.reordered_domain('kij') another_kij_to_RAS([k,i,j]) >>> ijk = CoordinateSystem('ijk', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> xyz = CoordinateSystem('xyz', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> x_start, y_start, z_start = [sympy.Symbol(s) for s in ['x_start', 'y_start', 'z_start']] >>> x_step, y_step, z_step = [sympy.Symbol(s) for s in ['x_step', 'y_step', 'z_step']] >>> i, j, k = [sympy.Symbol(s) for s in 'ijk'] >>> T = np.array([[x_step,0,0,x_start],[0,y_step,0,y_start],[0,0,z_step,z_start],[0,0,0,1]]) >>> T array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) >>> A = AffineTransform(ijk, xyz, T) >>> A AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) ) >>> A([i,j,k]) array([x_start + i*x_step, y_start + j*y_step, z_start + k*z_step], dtype=object) >>> # this is another >>> A_kij = A.reordered_domain('kij') >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, x_step, 0, x_start], [0, 0, y_step, y_start], [z_step, 0, 0, z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> >>> A_kij([k,i,j]) array([x_start + i*x_step, y_start + j*y_step, z_start + k*z_step], dtype=object) >>> # let's look at another reordering >>> A_kij_yzx = A_kij.reordered_range('yzx') >>> A_kij_yzx AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('y', 'z', 'x'), name='', coord_dtype=object), affine=array([[0, 0, y_step, y_start], [z_step, 0, 0, z_start], [0, x_step, 0, x_start], [0, 0, 0, 1.00000000000000]], dtype=object) ) >>> A_kij_yzx([k,i,j]) array([y_start + j*y_step, z_start + k*z_step, x_start + i*x_step], dtype=object) >>> class RASTransform(AffineTransform): """ An AffineTransform with output, i.e. range: x: units of 1mm increasing from Right to Left y: units of 1mm increasing from Anterior to Posterior z: units of 1mm increasing from Superior to Inferior """ def reorder_range(self): raise ValueError('not allowed to reorder the "xyz" output coordinates') def to_LPS(self): from copy import copy return AffineTransform(copy(self.function_domain), copy(self.function_range), np.dot(np.diag([-1,-1,1,1], self.affine)) class LPSTransform(AffineTransform): """ An AffineTransform with output, i.e. range: x: units of 1mm increasing from Left to Right y: units of 1mm increasing from Posterior to Anterior z: units of 1mm increasing from Inferior to Superior """ def reorder_range(self): raise ValueError('not allowed to reorder the "xyz" output coordinates') def to_RAS(self): from copy import copy return AffineTransform(copy(self.function_domain), copy(self.function_range), np.dot(np.diag([-1,-1,1,1], self.affine))) class NeuroImage(Image): def __init__(self, data, affine, axis_names, world='world-RAS'): affine_transform = {'LPS':LPSTransform, 'RAS':RAITransform}[world])(axis_names[:3], "xyz", affine} ... LPIImage only forced it to be of one type. Email #1 -------- Excuse the long email but I started writing, and then it started looking like documentation. I will put most of it into doc/users/coordinate_map.rst. Also, I am not sure what this means. The image is in LPI ordering, only if the reference frame of the world space it is pointing to is. I am proposing we enforce the world space to have this frame of reference to be explicit so that you could tell left from right on an image after calling xyz_ordered(). If it is pointing to MNI152 (or Talairach), then x=Left to Right, y=Posterior to Anterior, and z=Inferior to Superior. If not, you are not in MNI152. Moreover, according to the FSL docs, the whole 'anatomical' versus 'neurological' mess that I hear has been a long standing problem has nothing to do with the target frame of reference, but only with the way the data is stored. I think the LPI designation simply specifies "x=Left to Right, y=Posterior to Anterior, and z=Inferior to Superior" so any MNI152 or Tailarach would be in LPI coordinates, that's all I'm trying to specify with the designation "LPI". If MNI152 might imply a certain voxel size, then I would prefer not to use MNI152. If there's a better colour for the bike shed, then I'll let someone else paint it, :) This LPI specification actually makes a difference to the "AffineImage/LPIImage.xyz_ordered" method. If, in the interest of being explicit, we would enforce the direction of x,y,z in LPI/Neuro/AffineImage, then the goal of having "xyz_ordered" return an image with an affine that has a diagonal with positive entries, as in the AffineImage specification, means that you might have to call affine_image.get_data()[::-1,::-1] # or some other combination of flips (i.e. you have to change how it is stored in memory). The other way to return an diagonal affine with positive entries is to flip send x to -x, y to -y, i.e. multiply the diagonal matrix by np.diag([-1,-1,1,1]) on the left. But then your AffineImage would now have "x=Right to Left, y=Anterior to Posterior" and we have lost the interpretation of x,y,z as LPI coordinates. By being explicit about the direction of x,y,z we know that if the affine matrix was diagonal and had a negative entry in the first position, then we know that left and right were flipped when viewed with a command like:: >>> pylab.imshow(image.get_data()[:,:,10]) Without specifying the direction of x,y,z we just don't know. You can of course create a new coordinate system describing, for instance the scanner space, where the first coordinate is not x, and the second not y, ... but I am not sure what this means: x, y, and z, as well as left or right, are just names. The only important information between two coordinate systems is the transform linking them. The sentence: "The only important information between two coordinate systems is the transform linking them." has, in one form or another, often been repeated in NiPy meetings, but no one bothers to define the terms in this sentence. So, I have to ask what is your definition of "transform" and "coordinate system"? I have a precise definition, and the names are part of it. Let's go through that sentence. Mathematically, if a transform is a function, then a transform knows its domain and its range so it knows the what the coordinate systems are. So yes, with transform defined as "function", if I give you a transform between two coordinate systems (mathematical spaces of some kind) the only important information about it is itself. The problem is that, for a 4x4 matrix T, the python function transform_function = lambda v: np.dot(T, np.hstack([v,1])[:3] has a "duck-type" domain that knows nothing about image acquisition and a range inferred by numpy that knows nothing about LPI or MNI152. The string "coord_sys" in AffineImage is meant to imply that its domain and range say it should be interpreted in some way, but it is not explicit in AffineImage. (Somewhere around here, I start veering off into documentation.... sorry). To me, a "coordinate system" is a basis for a vector space (sometimes you might want transforms between integers but ignore them for now). It's not even a description of an affine subspace of a vector space, (see e.g. http://en.wikipedia.org/wiki/Affine_transformation). To describe such an affine subspace, "coordinate system" would need one more piece of information, the "constant" or "displacement" vector of the affine subspace. Because it's a basis, each element in the basis can be identified by a name, so the transform depends on the names because that's how I determine a "coordinate system" and I need "coordinate systems" because they are what the domain and range of my "transform" are going to be. For instance, this describes the range "coordinate system" of a "transform" whose output is in LPI coordinates: "x" = a unit vector of length 1mm pointing in the Left to Right direction "y" = a unit vector of length 1mm pointing in the Posterior to Anterior direction "z" = a unit vector of length 1mm pointing in the Inferior to Superior direction OK, so that's my definition of "coordinate system" and the names are an important part of it. Now for the "transform" which I will restrict to be "affine transform". To me, this is an affine function or transformation between two vector spaces (we're not even considering affine transformations between affine spaces). I bring up the distinction because generally affine transforms act on affine spaces rather than vector spaces. A vector space is an affine subspace of itself with "displacement" vector given by its origin, hence it is an affine space and so we can define affine functions on vector spaces. Because it is an affine function, the mathematical image of the domain under this function is an affine subspace of its range (which is a vector space). The "displacement" vector of this affine subspace is represented by the floats in b where A,b = to_matvec(T) (once I have specified a basis for the range of this function). Since my "affine transform" is a function between two vector spaces, it should have a domain that is a vector space, as well. For the "affine transform" associated with an Image, this domain vector space has coordinates that can be interpreted as array coordinates, or coordinates in a "data cube". Depending on the acquisition parameters, these coordinates might have names like "phase", "freq", "slice". Now, I can encode all this information in a tuple: (T=a 4x4 matrix of floats with bottom row [0,0,0,1], ('phase', 'freq', "slice"), ('x','y','z')) >>> import numpy as np >>> from nipy.core.api import CoordinateSystem, AffineTransform >>> acquisition = ('phase', 'freq', 'slice') >>> xyz_world = ('x','y','z') >>> T = np.array([[2,0,0,-91.095],[0,2,0,-129.51],[0,0,2,-73.25],[0,0,0,1]]) >>> AffineTransform(CoordinateSystem(acquisition), CoordinateSystem(xyz_world), T) AffineTransform( function_domain=CoordinateSystem(coord_names=('phase', 'freq', 'slice'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) The float64 appearing above is a way of specifying that the "coordinate systems" are vector spaces over the real numbers, rather than, say the complex numbers. It is specified as an optional argument to CoordinateSystem. Compare this to the way a MINC file is described:: jtaylo@ubuntu:~$ mincinfo data.mnc file: data.mnc image: signed__ short -32768 to 32767 image dimensions: zspace yspace xspace dimension name length step start -------------- ------ ---- ----- zspace 84 2 -73.25 yspace 114 2 -129.51 xspace 92 2 -91.095 jtaylo@ubuntu:~$ jtaylo@ubuntu:~$ mincheader data.mnc netcdf data { dimensions: zspace = 84 ; yspace = 114 ; xspace = 92 ; variables: double zspace ; zspace:varid = "MINC standard variable" ; zspace:vartype = "dimension____" ; zspace:version = "MINC Version 1.0" ; zspace:comments = "Z increases from patient inferior to superior" ; zspace:spacing = "regular__" ; zspace:alignment = "centre" ; zspace:step = 2. ; zspace:start = -73.25 ; zspace:units = "mm" ; double yspace ; yspace:varid = "MINC standard variable" ; yspace:vartype = "dimension____" ; yspace:version = "MINC Version 1.0" ; yspace:comments = "Y increases from patient posterior to anterior" ; yspace:spacing = "regular__" ; yspace:alignment = "centre" ; yspace:step = 2. ; yspace:start = -129.509994506836 ; yspace:units = "mm" ; double xspace ; xspace:varid = "MINC standard variable" ; xspace:vartype = "dimension____" ; xspace:version = "MINC Version 1.0" ; xspace:comments = "X increases from patient left to right" ; xspace:spacing = "regular__" ; xspace:alignment = "centre" ; xspace:step = 2. ; xspace:start = -91.0950012207031 ; xspace:units = "mm" ; short image(zspace, yspace, xspace) ; image:parent = "rootvariable" ; image:varid = "MINC standard variable" ; image:vartype = "group________" ; image:version = "MINC Version 1.0" ; image:complete = "true_" ; image:signtype = "signed__" ; image:valid_range = -32768., 32767. ; image:image-min = "--->image-min" ; image:image-max = "--->image-max" ; int rootvariable ; rootvariable:varid = "MINC standard variable" ; rootvariable:vartype = "group________" ; rootvariable:version = "MINC Version 1.0" ; rootvariable:parent = "" ; rootvariable:children = "image" ; double image-min ; image-min:varid = "MINC standard variable" ; image-min:vartype = "var_attribute" ; image-min:version = "MINC Version 1.0" ; image-min:_FillValue = 0. ; image-min:parent = "image" ; double image-max ; image-max:varid = "MINC standard variable" ; image-max:vartype = "var_attribute" ; image-max:version = "MINC Version 1.0" ; image-max:_FillValue = 1. ; image-max:parent = "image" ; data: zspace = 0 ; yspace = 0 ; xspace = 0 ; rootvariable = _ ; image-min = -50 ; image-max = 50 ; } I like the MINC description, but the one thing missing in this file is the ability to specify ('phase', 'freq', 'slice'). It may be possible to add it but I'm not sure, it certainly can be added by adding a string to the header. It also mixes the definition of the basis with the affine transformation (look at the output of mincheader which says that yspace has step 2). The NIFTI-1 standard allows limited possibilities to specify ('phase', 'freq', 'slice') this with its dim_info byte but there are pulse sequences for which these names are not appropriate. One might ask: why bother making a "coordinate system" for the voxels. Well, this is part of my definition of "affine transform". More importantly, it separates the notion of world axes ('x','y','z') and voxel indices ('i','j','k'). There is at least one use case, slice timing, a key step in the fMRI pipeline, where we need to know which spatial axis is slice. One solution would be to just add an attribute to AffineImage called "slice_axis" but then, as Gael says, the possibilities for axis names are infinite, what if we want an attribute for "group_axis"? AffineTransform provides an easy way to specify an axis as "slice": >>> unknown_acquisition = ('i','j','k') >>> A = AffineTransform(CoordinateSystem(unknown_acquisition), ... CoordinateSystem(xyz_world), T) After some deliberation, we find out that the third axis is slice... >>> A.renamed_domain({'k':'slice'}) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'slice'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) Another question one might ask is: why bother allowing non-4x4 affine matrices like: >>> AffineTransform.from_params('ij', 'xyz', np.array([[2,3,1,0],[3,4,5,0],[7,9,3,1]]).T) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[2., 3., 7.], [3., 4., 9.], [1., 5., 3.], [0., 0., 1.]]) ) For one, it allows very clear specification of a 2-dimensional plane (i.e. a 2-dimensional affine subspace of some vector spce) called P, in, say, the LPI "coordinate system". Let's say we want the plane in LPI-world corresponding to "j=30" for im above. (I guess that's coronal?) Make an affine transform that maps (i,k) -> (i,30,k): >>> j30 = AffineTransform(CoordinateSystem('ik'), CoordinateSystem('ijk'), np.array([[1,0,0],[0,0,30],[0,1,0],[0,0,1]])) >>> j30 AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0.], [ 0., 0., 30.], [ 0., 1., 0.], [ 0., 0., 1.]]) ) Its dtype is np.float since we didn't specify np.int in constructing the CoordinateSystems: >>> from nipy.core.api import compose >>> j30_to_XYZ = compose(A, j30) >>> j30_to_XYZ AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , -91.095], [ 0. , 0. , -69.51 ], [ 0. , 2. , -73.25 ], [ 0. , 0. , 1. ]]) ) This could be used to resample any RAS Image on the coronal plane y=-69.51 with voxels of size 2mm x 2mm starting at x=-91.095 and z=-73.25. Of course, this doesn't seem like a very natural slice. The module :mod:`nipy.core.reference.slices` has some convenience functions for specifying slices. >>> from nipy.core.reference.slices import yslice, bounding_box >>> x_spec = ([-92,92], 93) # voxels of size 2 in x, starting at -92, ending at 92 >>> z_spec = ([-70,100], 86) # voxels of size 2 in z, starting at -70, ending at 100 When specifying a *y* slice - we have to know what "y" means. In order for "y" to have meaning, we need to specify the name of an output (range) space that has a defined "y". In this case we use MNI space: >>> y70 = yslice(70, x_spec, z_spec, 'mni') >>> y70 AffineTransform( function_domain=CoordinateSystem(coord_names=('i_x', 'i_z'), name='slice', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., -92.], [ 0., 0., 70.], [ 0., 2., -70.], [ 0., 0., 1.]]) ) >>> x_lims, y_lims, z_lims = bounding_box(y70, (x_spec[1], z_spec[1])) >>> assert np.all(x_lims == (-92, 92)) >>> assert np.all(y_lims == (70, 70)) >>> assert np.all(z_lims == (-70, 100)) Maybe these aren't things that "normal human beings" (to steal a quote from Gael) can use, but they're explicit and they are tied to precise mathematical objects. Email #2 --------- I apologize again for the long emails, but I'm glad we. as a group, are having this discussion electronically. Usually, our discussions of CoordinateMap begin with Matthew standing in front of a white board with a marker and asking a newcomer, "Are you familiar with the notion of a transformation, say, from voxel to world?" :) Where they go after that really depends on the kind of day everyone's having... :) These last two emails also have the advantage that most of them can go right in to doc/users/coordinate_map.rst. I agree with Gael that LPIImage is an obscure name. OK. I already know that people often don't agree with names I choose, just ask Matthew. :) I just wanted to choose a name that is as explicit as possible. Since I'm neither a neuroscientist nor an MRI physicist but a statistician, I have no idea what it really means. I found it mentioned in this link below and John Ollinger mentioned LPI in another email thread http://afni.nimh.nih.gov/afni/community/board/read.php?f=1&i=9140&t=9140 I was suggesting we use a well-established term, apparently LPI is not well-established. :) Does LPS mean (left, posterior, superior)? Doesn't that suggest that LPI means (left, posterior, inferior) and RAI means (right, anterior, inferior)? If so, then good, now I know what LPI means and I'm not a neuroscientist or an MRI physicist, :) We can call the images RASImages, or at least let's call their AffineTransform RASTransforms, or we could have NeuroImages that can only have RASTransforms or LPSTransforms, NeuroTransform that have a property and NeuroImage raises an exception like this:: @property def world(self): return self.affine_transform.function_range if (self.world.name not in ['world-RAS', 'world-LPS'] or self.world.coord_names != ('x', 'y', 'z')): raise ValueError("the output space must be named one of " "['world-RAS','world-LPS'] and " "the axes must be ('x', 'y', 'z')") _doc['world'] = "World space, one of ['world-RAS', 'world-LPS']. If it is 'world-LPS', then x increases from patient's left to right, y increases posterior to anterior, z increases superior to inferior. If it is 'world-RAS' then x increases patient's right to left, y increases posterior to anterior, z increases superior to inferior." I completely advocate any responsibility for deciding which acronym to choose, someone who can use rope can just change every lpi/LPI to ras/RAS I just want it explicit. I also want some version of these phrases "x increases from patient's right to left", "y increases from posterior to anterior", "z increases from superior to inferior" somewhere in a docstring for RAS/LPSTransform (see why I feel that "increasing vs. decreasing" is important below). I want the name and its docstring to scream at you what it represents so there is no discussion like on the AFNI list where users are not sure which output of which program (in AFNI) should be flipped (see the other emails in the thread). It should be a subclass of AffineTransform because it has restrictions: namely, its range is 'xyz' and "xy" can be interpreted in of two ways either RAS or LPS). You can represent any other version of RAS/LPS or (whatever colour your bike shed is, :)) with the same class, it just may have negative values on the diagonal. If it has some rotation applied, then it becomes pretty hard (at least for me) to decide if it's RAS or LPS from the 4x4 matrix of floats. I can't even tell you now when I look at the FIAC data which way left and right go unless I ask Matthew. For background, you may want to look at what Gordon Kindlmann did for nrrd format where you can declare the space in which your orientation information and other transforms should be interpreted: http://teem.sourceforge.net/nrrd/format.html#space Or, if that's too flexible for you, you could adopt a standard space. ITK chose LPS to match DICOM. For slicer, like nifti, we chose RAS It may be that there is well-established convention for this, but then why does ITK say DICOM=LPS and AFNI say DICOM=RAI? At least MINC is explicit. I favor making it as precise as MINC does. That AFNI discussion I pointed to uses the pairing RAI/DICOM and LPI/SPM. This discrepancy suggests there's some disagreement between using the letters to name the system and whether they mean increasing or decreasing. My guess is that LPI=RAS based on ITK/AFNI's identifications of LPS=DICOM=RAI. But I can't tell if the acronym LPI means "x is increasing L to R, y increasing from P to A, z in increasing from I to S" which would be equivalent to RAS meaning "x decreasing from R to L, y decreasing from A to P, z is decreasing from S to I". That is, I can't tell from the acronyms which of LPI or RAS is using "increasing" and which is "decreasing", i.e. they could have flipped everything so that LPI means "x is decreasing L to R, y is decreasing P to A, z is decreasing I to S" and RAS means "x is increasing R to L, y is increasing A to P, z is increasing S to I". To add more confusion to the mix, the acronym doesn't say if it is the patient's left to right or the technician looking at him, :) For this, I'm sure there's a standard answer, and it's likely the patient, but heck, I'm just a statistician so I don't know the answer. (every volume has an ijkToRAS affine transform). We convert to/from LPS when calling ITK code, e.g., for I/O. How much clearer can you express "ijkToRAS" or "convert to/from LPS" than something like this: >>> T = np.array([[2,0,0,-91.095],[0,2,0,-129.51],[0,0,2,-73.25],[0,0,0,1]]) >>> ijk = CoordinateSystem('ijk', 'voxel') >>> RAS = CoordinateSystem('xyz', 'world-RAS') >>> ijk_to_RAS = AffineTransform(ijk, RAS, T) >>> ijk_to_RAS AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) >>> LPS = CoordinateSystem('xyz', 'world-LPS') >>> RAS_to_LPS = AffineTransform(RAS, LPS, np.diag([-1,-1,1,1])) >>> ijk_to_LPS = compose(RAS_to_LPS, ijk_to_RAS) >>> RAS_to_LPS AffineTransform( function_domain=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-LPS', coord_dtype=float64), affine=array([[-1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) ) >>> ijk_to_LPS AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-LPS', coord_dtype=float64), affine=array([[ -2. , 0. , 0. , 91.095], [ 0. , -2. , 0. , 129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) Of course, we shouldn't rely on the names ijk_to_RAS to know that it is an ijk_to_RAS transform, that's why they're in the AffineTransform. I don't think any one wants an attribute named "ijk_to_RAS" for AffineImage/Image/LPIImage. The other problem that LPI/RAI/AffineTransform addresses is that someday you might want to transpose the data in your array and still have what you would call an "image". AffineImage allows this explicitly because there is no identifier for the domain of the AffineTransform (the attribute name "coord_sys" implies that it refers to either the domain or the range but not both). (Even those who share the sentiment that "everything that is important about the linking between two coordinate systems is contained in the transform" acknowledge there are two coordinate systems :)) Once you've transposed the array, say >>> data = np.random.normal(size=(10, 12, 14)) # original array >>> newdata = data.transpose([2,0,1]) You shouldn't use something called "ijk_to_RAS" or "ijk_to_LPS" transform. Rather, you should use a "kij_to_RAS" or "kij_to_LPS" transform. >>> ijk = CoordinateSystem('ijk', 'voxel') >>> kij = CoordinateSystem('kij', 'voxel') >>> ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) Check that it does the right permutation >>> i, j, k = 10., 20., 40 >>> ijk_to_kij([i, j, k]) array([40., 10., 20.]) Yup, now let's try to make a kij_to_RAS transform At first guess, we might try >>> kij_to_RAS = compose(ijk_to_RAS, ijk_to_kij) Traceback (most recent call last): ... ValueError: domains and ranges don't match up correctly We have a problem, we've asked for a composition that doesn't make sense. If you're good with permutation matrices, you wouldn't have to call "compose" above and you can just do matrix multiplication. But here the name of the function tells you that yes, you should do the inverse: "ijk_to_kij" says that the range are "kij" values, but to get a "transform" for your data in "kij" it should have a domain that is "kij". The call to compose raised an exception because it saw you were trying to compose a function with domain="ijk" and range="kji" with a function (on its left) having domain="ijk" and range "kji". This composition just doesn't make sense so it raises an exception. >>> kij_to_ijk = ijk_to_kij.inverse() >>> kij_to_RAS = compose(ijk_to_RAS, kij_to_ijk) >>> kij_to_RAS AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), affine=array([[ 0. , 2. , 0. , -91.095], [ 0. , 0. , 2. , -129.51 ], [ 2. , 0. , 0. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) >>> ijk_to_RAS([i,j,k]) array([-71.095, -89.51 , 6.75 ]) >>> kij_to_RAS([k,i,j]) array([-71.095, -89.51 , 6.75 ]) We also shouldn't have to rely on the names of the AffineTransforms, i.e. ijk_to_RAS, to remember what's what (in typing this example, I mixed up kij and kji many times). The objects ijk_to_RAS, kij_to_RAS represent the same "affine transform", as evidenced by their output above. There are lots of representations of the same "affine transform": (6=permutations of i,j,k)*(6=permutations of x,y,z)=36 matrices for one "affine transform". If we throw in ambiguity about the sign in front of the output, there are 36*(8=2^3 possible flips of the x,y,z)=288 matrices possible but there are only really 8 different "affine transforms". If you force the order of the range to be "xyz" then there are 6*8=48 different matrices possible, again only specifying 8 different "affine transforms". For AffineImage, if we were to allow both "LPS" and "RAS" this means two flips are allowed, namely either "LPS"=[-1,-1,1] or "RAS"=[1,1,1], so there are 6*2=12 possible matrices to represent 2 different "affine transforms". Here's another example that uses sympy to show what's going on in the 4x4 matrix as you reorder the 'ijk' and the 'RAS'. (Note that this code won't work in general because I had temporarily disabled a check in CoordinateSystem that enforced the dtype of the array to be a builtin scalar dtype for sanity's sake). To me, each of A, A_kij and A_kij_yzx below represent the same "transform" because if I substitute i=30, j=40, k=50 and I know the order of the 'xyz' in the output then they will all give me the same answer. >>> import sympy >>> ijk = CoordinateSystem('ijk', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> xyz = CoordinateSystem('xyz', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> x_start, y_start, z_start = [sympy.Symbol(s) for s in ['x_start', 'y_start', 'z_start']] >>> x_step, y_step, z_step = [sympy.Symbol(s) for s in ['x_step', 'y_step', 'z_step']] >>> i, j, k = [sympy.Symbol(s) for s in 'ijk'] >>> T = np.array([[x_step,0,0,x_start],[0,y_step,0,y_start],[0,0,z_step,z_start],[0,0,0,1]]) >>> T array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) >>> A = AffineTransform(ijk, xyz, T) >>> A AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) ) >>> A([i,j,k]) == [x_start + i*x_step, y_start + j*y_step, z_start + k*z_step] array([ True, True, True]) This is another >>> A_kij = A.reordered_domain('kij') >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> A_kij([k,i,j]) array([1.0*i*x_step + 1.0*x_start, 1.0*j*y_step + 1.0*y_start, 1.0*k*z_step + 1.0*z_start], dtype=object) Let's look at another reordering: >>> A_kij_yzx = A_kij.reordered_range('yzx') >>> A_kij_yzx AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('y', 'z', 'x'), name='', coord_dtype=object), affine=array([[0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 0, 1.00000000000000]], dtype=object) ) >>> A_kij_yzx([k,i,j]) array([1.0*j*y_step + 1.0*y_start, 1.0*k*z_step + 1.0*z_start, 1.0*i*x_step + 1.0*x_start], dtype=object) >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> from nipy.core.reference.coordinate_map import equivalent >>> equivalent(A_kij, A) True >>> equivalent(A_kij, A_kij_yzx) True nipy-0.6.1/doc/devel/code_discussions/image_ordering.rst000066400000000000000000000137661470056100100234150ustar00rootroot00000000000000.. _image_ordering: Image index ordering ==================== Background ---------- In general, images - and in particular NIfTI format images, are ordered in memory with the X dimension changing fastest, and the Z dimension changing slowest. Numpy has two different ways of indexing arrays in memory, C and fortran. With C index ordering, the first index into an array indexes the slowest changing dimension, and the last indexes the fastest changing dimension. With fortran ordering, the first index refers to the fastest changing dimension - X in the case of the image mentioned above. C is the default index ordering for arrays in Numpy. For example, let's imagine that we have a binary block of 3D image data, in standard NIfTI / Analyze format, with the X dimension changing fastest, called `my.img`, containing Float32 data. Then we memory map it: :: img_arr = memmap('my.img', dtype=float32) When we index this new array, the first index indexes the Z dimension, and the third indexes X. For example, if I want a voxel X=3, Y=10, Z=20 (zero-based), I have to get this from the array with: :: img_arr[20, 10, 3] The problem ----------- Most potential users of NiPy are likely to have experience of using image arrays in Matlab and SPM. Matlab uses Fortran index ordering. For fortran, the first index is the fastest changing, and the last is the slowest-changing. For example, here is how to get voxel X=3, Y=10, Z=20 (zero-based) using SPM in Matlab: :: img_arr = spm_read_vols(spm_vol('my.img')); img_arr(4, 11, 21) % matlab indexing is one-based This ordering fits better with the way that we talk about coordinates in functional imaging, as we invariably use XYZ ordered coordinates in papers. It is possible to do the same in numpy, by specifying that the image should have fortran index ordering: :: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3, 10, 20] Native fortran or C indexing for images --------------------------------------- We could change the default ordering of image arrays to fortran, in order to allow XYZ index ordering. So, change the access to the image array in the image class so that, to get the voxel at X=3, Y=10, Z=20 (zero-based): :: img = load_image('my.img') img[3, 10, 20] instead of the current situation, which requires: :: img = load_image('my.img') img[20, 10, 3] For and against fortran ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For: * Fortran index ordering is more intuitive for functional imaging because of conventional XYZ ordering of spatial coordinates, and Fortran index ordering in packages such as Matlab * Indexing into a raw array is fast, and common in lower-level applications, so it would be useful to implement the more intuitive XYZ ordering at this level rather than via interpolators (see below) * Standardizing to one index ordering (XYZ) would mean users would not have to think about the arrangement of the image in memory Against: * C index ordering is more familiar to C users * C index ordering is the default in numpy * XYZ ordering can be implemented by wrapping by an interpolator Note that there is no performance penalty for either array ordering, as this is dealt with internally by NumPy. For example, imagine the following:: arr = np.empty((100,50)) # Indexing is C by default arr2 = arr.transpose() # Now it is fortran # There should be no effective difference in speed for the next two lines b = arr[0] # get first row of data - most discontiguous memory c = arr2[:,0] # gets same data, again most discontiguous memory Potential problems for fortran ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Clash between default ordering of numpy arrays and nipy images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ C index ordering is the default in numpy, and using fortran ordering for images might be confusing in some circumstances. Consider for example: :: img_obj = load_image('my.img') # Where the Image class has been changed to implement Fortran ordering first_z_slice = img_obj[...,0] # returns a Z slice img_arr = memmap('my.img', dtype=float32) # C ordering, the numpy default img_obj = Image.from_array(img_arr) # this call may not be correct first_z_slice = img_obj[...,0] # in fact returns an X slice I suppose that we could check that arrays are fortran index ordered in the Image __init__ routine. An alternative proposal - XYZ ordering of output coordinates ------------------------------------------------------------ JT: Another thought, that is a compromise between the XYZ coordinates and Fortran ordering. To me, having worked mostly with C-type arrays, when I index an array I think in C terms. But, the Image objects have the "warp" attached to them, which describes the output coordinates. We could insist that the output coordinates are XYZT (or make this an option). So, for instance, if the 4x4 transform was the identity, the following two calls would give something like: :: >>> interp = interpolator(img) >>> img[3,4,5] == interp(5,4,3) True This way, users would be sure in the interpolator of the order of the coordinates, but users who want access to the array would know that they would be using the array order on disk... I see that a lot of users will want to think of the first coordinate as "x", but depending on the sampling the [0] slice of img may be the leftmost or the rightmost. To find out which is which, users will have to look at the 4x4 transform (or equivalently the start and the step). So just knowing the first array coordinate is the "x" coordinate still misses some information, all of which is contained in the transform. MB replied: I agree that the output coordinates are very important - and I think we all agree that this should be XYZ(T)? For the raw array indices - it is very common for people to want to do things to the raw image array - the quickstart examples containing a few - and you usually don't care about which end of X is left in that situation, only which spatial etc dimension the index refers to. nipy-0.6.1/doc/devel/code_discussions/index.rst000066400000000000000000000007061470056100100215370ustar00rootroot00000000000000.. _code-discussions: ================ Code discussions ================ These are some developer discussions about design of code in NIPY. .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 understanding_affines image_ordering registration_api repository_design brainvisa_repositories repository_api pipelining_api simple_viewer usecases/index refactoring/index comparisons/index nipy-0.6.1/doc/devel/code_discussions/pipelining_api.rst000066400000000000000000000013651470056100100234210ustar00rootroot00000000000000.. _pipelining_api: ================================== What would pipelining look like? ================================== Imagine a repository that is a modified version of the one in :ref:`repository_api` Then:: my_repo = SubjectRepository('/some/structured/file/system') my_designmaker = MyDesignParser() # Takes parameters from subject to create design my_pipeline = Pipeline([ realignerfactory('fsl'), slicetimerfactory('nipy', 'linear'), coregisterfactory('fsl', 'flirt'), normalizerfactory('spm'), filterfactory('nipy', 'smooth', 8), designfactory('nipy', my_designmaker), ]) my_analysis = SubjectAnalysis(my_repo, subject_pipeline=my_pipeline) my_analysis.do() my_analysis.archive() nipy-0.6.1/doc/devel/code_discussions/refactoring/000077500000000000000000000000001470056100100221765ustar00rootroot00000000000000nipy-0.6.1/doc/devel/code_discussions/refactoring/imagelists.rst000066400000000000000000000014361470056100100250750ustar00rootroot00000000000000======================== Refactoring imagelists ======================== Usecases for ImageList ====================== Thus far only used in anger in :mod:`nipy.modalities.fmri.fmristat.model`, similarly in :mod:`nipy.modalities.fmri.spm.model`. From that file, an object ``obj`` of class :class:`FmriImageList` must: * return 4D array from ``np.asarray(obj)``, such that the first axis (axis 0) is the axis over which the model is applied * be indexable such that ``obj[0]`` returns an Image instance, with valid ``shape`` and ``coordmap`` attributes for a time-point 3D volume in the 4D time-series. * have an attribute ``volume_start_times`` giving times of the start of each of the volumes in the 4D time series. * Return the number of volumes in the time-series from ``len(obj)`` nipy-0.6.1/doc/devel/code_discussions/refactoring/index.rst000066400000000000000000000002101470056100100240300ustar00rootroot00000000000000.. _refactoring_index: ====================== Defining use cases ====================== .. toctree:: :maxdepth: 2 imagelists nipy-0.6.1/doc/devel/code_discussions/registration_api.rst000066400000000000000000000057031470056100100237750ustar00rootroot00000000000000========================= Registration API Design ========================= This contains design ideas for the end-user api when registering images in nipy. We want to provide a simple api, but with enough flexibility to allow users to changes various components of the pipeline. We will also provide various **Standard** scripts that perform typical pipelines. The pluggable script:: func_img = load_image(filename) anat_img = load_image(filename) interpolator = SplineInterpolator(order=3) metric = NormalizedMutualInformation() optimizer = Powell() strategy = RegistrationStrategy(interpolator, metric, optimizer) w2w = strategy.apply(img_fixed, img_moving) To apply the transform and resample the image:: new_img = resample(img_moving, w2w, interp=interpolator) Or:: new_img = Image(img_moving, w2w*img_moving.coordmap) Transform Multiplication ------------------------ The multiplication order is important and coordinate systems must *make sense*. The *output coordinates* of the mapping on the right-hand of the operator, must match the *input coordinates* of the mapping on the left-hand side of the operator. For example, imageA has a mapping from voxels-to-world (v2w), imageB has a mapping from world-to-world (w2w). So the output of imageA, *world*, maps to the input of imageB, *world*. We would compose a new mapping (transform) from these mappings like this:: new_coordmap = imageB.coordmap * imageA.coordmap If one tried to compose a mapping in the other order, an error should be raised as the code would detect a mismatch of trying to map output coordinates from imageB, *world* to the input coordinates of imageA, *voxels*:: new_coordmap = imageA.coordmap * imageB.coordmap raise ValueError!!! Note: We should consider a meaningful error message to help people quickly correct this mistake. One way to remember this ordering is to think of composing functions. If these were functions, the output of the first function to evaluate (imageA.coordmap) is passed as input to the second function (imageB.coordmap). And therefore they must match:: new_coordmap = imageB.coordmap(imageA.coordmap()) Matching Coordinate Systems --------------------------- We need to make sure we can detect mismatched coordinate mappings. The CoordinateSystem class has a check for equality (__eq__ method) based on the axis and name attributes. Long-term this may not be robust enough, but it's a starting place. We should write tests for failing cases of this, if they don't already exists. CoordinateMap ------------- Recall the CoordinateMap defines a mapping between two coordinate systems, an input coordinate system and an output coordinate system. One example of this would be a mapping from voxel space to scanner space. In a Nifti1 header we would have an affine transform to apply this mapping. The *input coordinates* would be voxel space, the *output coordinates* would be world space, and the affine transform provides the mapping between them. nipy-0.6.1/doc/devel/code_discussions/repository_api.rst000066400000000000000000000032771470056100100235060ustar00rootroot00000000000000.. _repository_api: Repository API ============== See also :ref:`repository_design` and :ref:`brainvisa_repositories` FMRI datasets often have the structure: * Group (sometimes) e.g. Patients, Controls * Subject e.g. Subject1, Subject2 * Session e.g. Sess1, Sess1 How about an interface like: :: repo = GSSRespository( root_dir = '/home/me/data/experiment1', groups = {'patients': {'subjects': {'patient1': {'sess1': 'filter': 'raw*nii'}, {'sess2': 'filter': 'raw*nii'} }, {'patient2': {'sess1': 'filter': 'raw*nii'} {'sess2': 'filter': 'raw*nii'} } }, 'controls': {'subjects': {'control1': {'sess1': 'filter': 'raw*nii'}, {'sess2': 'filter': 'raw*nii'} }, {'control2': {'sess1': 'filter': 'raw*nii'} {'sess2': 'filter': 'raw*nii'} } } }) for group in repo.groups: for subject in group.subjects: for session in subject.sessions: img = session.image # do something with image We would need to think about adding metadata such as behavioral data from the scanning session, and so on. I suppose this will help us move transparently to using something like HDF5 for data storage. nipy-0.6.1/doc/devel/code_discussions/repository_design.rst000066400000000000000000000060651470056100100242040ustar00rootroot00000000000000.. _repository_design: =================== Repository design =================== See also :ref:`repository_api` and :ref:`brainvisa_repositories` For the NIPY system, there seems to be interest for the following: * Easy distributed computing * Easy scripting, replicating the same analysis on different data * Flexibility - easy of inter-operation with other brain imaging systems At a minimum, this seems to entail the following requirements for the NIPY repository system: * Unique identifiers of data, which can be abstracted from the most local or convenient data storage * A mechanism for mapping the canonical data model(s) from NIPY to an arbitrary, and potentially even inconsistent repository structure * A set of semantic primitives / metadata slots, enabling for example: * "all scans from this subject" * "the first scan from every subject in the control group" * "V1 localizer scans from all subjects" * "Extract the average timecourse for each subject from the ROI defined by all voxels with t > 0.005 in the V1 localizer scan for that subject" These problems are not unique to the problem of brain imaging data, and in many cases have been treated in the domains of database design, geospatial and space telescope data, and the semantic web. Technologies of particular interest include: * HDF5 - the basis of MINC 2.0 (and potentially NIFTII 2), the most recent development in the more general CDF / HDF series (and very highly regarded). There are excellent python binding available in `PyTables `_. * Relational database design - it would be nice to efficiently select data based on any arbitrary subset of attributes associated with that data. * The notion of `URI `_ developed under the guidance of the w3c. Briefly, a URI consists of: * An authority (i.e. a domain name controlled by a particular entity) * A path - a particular resource specified by that authority * Abstraction from storage (as opposed to a URL) - a URI does not necessarily include the information necessary for retrieving the data referred to, though it may. * Ways of dealing with hierarchical data as developed in the XML field (though these strategies could be implemented potentially in other hierarchical data formats - even filesystems). Note that incorporation of any of the above ideas does not require the use of the actual technology referenced. For example, relational queries can be made in PyTables in many cases **more efficiently** than in a relational database by storing everything in a single denormalized table. This data structure tends to be more efficient than the equivalent normalized relational database format in the cases where a single data field is much larger than the others (as is the case with the data array in brain imaging data). That said, adherence to standards allows us to leverage existing code which may be tuned to a degree that would be beyond the scope of this project (for example, fast Xpath query libraries, as made available via lxml in Python). nipy-0.6.1/doc/devel/code_discussions/simple_viewer.rst000066400000000000000000000003101470056100100232710ustar00rootroot00000000000000Simple image viewer ------------------- Other attempts -------------- http://biomag.wikidot.com/mri-tools http://code.google.com/p/dicompyler https://cirl.berkeley.edu/svn/cburns/trunk/nifti_viewer nipy-0.6.1/doc/devel/code_discussions/understanding_affines.rst000066400000000000000000000225541470056100100247750ustar00rootroot00000000000000.. _understanding_affines: ============================================= Understanding voxel and real world mappings ============================================= Voxel coordinates and real-world coordinates ---------------------------------------------- A point can be represented by coordinates relative to specified axes. coordinates are (almost always) numbers - see `coordinate systems `_ For example, a map grid reference gives a coordinate (a pair of numbers) to a point on the map. The numbers give the respective positions on the horizontal (``x``) and vertical (``y``) axes of the map. A coordinate system is defined by a set of axes. In the example above, the axes are the ``x`` and ``y`` axes. Axes for coordinates are usually orthogonal - for example, moving one unit up on the ``x`` axis on the map causes no change in the ``y`` coordinate - because the axes are at 90 degrees. In this discussion we'll concentrate on the three dimensional case. Having three dimensions means that we have a three axis coordinate system, and coordinates have three values. The meaning of the values depend on what the axes are. Voxel coordinates ````````````````` Array indexing is one example of using a coordinate system. Let's say we have a three dimensional array:: A = np.arange(24).reshape((2,3,4)) The value ``0`` is at array coordinate ``0,0,0``:: assert A[0,0,0] == 0 and the value ``23`` is at array coordinate ``1,2,3``:: assert A[1,2,3] == 23 (remembering python's zero-based indexing). If we now say that our array is a 3D volume element array - an array of voxels, then the array coordinate is also a voxel coordinate. If we want to use ``numpy`` to index our array, then we need integer voxel coordinates, but if we use a resampling scheme, we can also imagine non-integer voxel coordinates for ``A``, such as ``(0.6,1.2,1.9)``, and we could use resampling to estimate the value at such a coordinate, given the actual data in the surrounding (integer) points. Array / voxel coordinates refer to the array axes. Without any further information, they do not tell us about where the point is in the real world - the world we can measure with a ruler. We refer to array / voxel coordinates with indices ``i, j, k``, where ``i`` is the first value in the 3 value coordinate tuple. For example, if array / voxel point ``(1,2,3)`` has ``i=1, j=2, k=3``. We'll be careful only to use ``i, j, k`` rather than ``x, y, z``, because we are going to use ``x, y, z`` to refer to real-world coordinates. Real-world coordinates `````````````````````` Real-world coordinates are coordinates where the values refer to real-world axes. A real-world axis is an axis that refers to some real physical space, like low to high position in an MRI scanner, or the position in terms of the subject's head. Here we'll use the usual neuroimaging convention, and that is to label our axes relative to the subject's head: * ``x`` has negative values for left and positive values for right * ``y`` has negative values for posterior (back of head) and positive values for anterior (front of head) * ``z`` has negative values for the inferior (towards the neck) and positive values for superior (towards the highest point of the head, when standing) Image index ordering -------------------- Background `````````` In general, images - and in particular NIfTI format images, are ordered in memory with the X dimension changing fastest, and the Z dimension changing slowest. Numpy has two different ways of indexing arrays in memory, C and fortran. With C index ordering, the first index into an array indexes the slowest changing dimension, and the last indexes the fastest changing dimension. With fortran ordering, the first index refers to the fastest changing dimension - X in the case of the image mentioned above. C is the default index ordering for arrays in Numpy. For example, let's imagine that we have a binary block of 3D image data, in standard NIfTI / Analyze format, with the X dimension changing fastest, called `my.img`, containing Float32 data. Then we memory map it:: img_arr = memmap('my.img', dtype=float32) When we index this new array, the first index indexes the Z dimension, and the third indexes X. For example, if I want a voxel X=3, Y=10, Z=20 (zero-based), I have to get this from the array with:: img_arr[20, 10, 3] The problem ``````````` Most potential users of NiPy are likely to have experience of using image arrays in Matlab and SPM. Matlab uses Fortran index ordering. For fortran, the first index is the fastest changing, and the last is the slowest-changing. For example, here is how to get voxel X=3, Y=10, Z=20 (zero-based) using SPM in Matlab:: img_arr = spm_read_vols(spm_vol('my.img')); img_arr(4, 11, 21) % matlab indexing is one-based This ordering fits better with the way that we talk about coordinates in functional imaging, as we invariably use XYZ ordered coordinates in papers. It is possible to do the same in numpy, by specifying that the image should have fortran index ordering:: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3, 10, 20] The proposal ```````````` Change the default ordering of image arrays to fortran, in order to allow XYZ index ordering. So, change the access to the image array in the image class so that, to get the voxel at X=3, Y=10, Z=20 (zero-based):: img = Image('my.img') img[3, 10, 20] instead of the current situation, which requires:: img = Image('my.img') img[20, 10, 3] Summary of discussion ````````````````````` For: * Fortran index ordering is more intuitive for functional imaging because of conventional XYZ ordering of spatial coordinates, and Fortran index ordering in packages such as Matlab * Indexing into a raw array is fast, and common in lower-level applications, so it would be useful to implement the more intuitive XYZ ordering at this level rather than via interpolators (see below) * Standardizing to one index ordering (XYZ) would mean users would not have to think about the arrangement of the image in memory Against: * C index ordering is more familiar to C users * C index ordering is the default in numpy * XYZ ordering can be implemented by wrapping by an interpolator Potential problems `````````````````` Performance penalties ^^^^^^^^^^^^^^^^^^^^^ KY commented:: This seems like a good idea to me but I have no knowledge of numpy internals (and even less than none after the numeric/numarray integration). Does anyone know if this will (or definitely will not) incur any kind of obvious performance penalties re. array operations (sans arcane problems like stride issues in huge arrays)? MB replied: Note that, we are not proposing to change the memory layout of the image, which is fixed by the image format in e.g NIfTI, but only to index it XYZ instead of ZYX. As far as I am aware, there are no significant performance differences between:: img_arr = memmap('my.img', dtype=float32, order='C') img_arr[5,4,3] and:: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3,4,5] Happy to be corrected though. Clash between default ordering of numpy arrays and nipy images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ C index ordering is the default in numpy, and using fortran ordering for images might be confusing in some circumstances. Consider for example: img_obj = Image('my.img') # Where the Image class has been changed to implement Fortran ordering first_z_slice = img_obj[...,0] # returns a Z slice img_arr = memmap('my.img', dtype=float32) # C ordering, the numpy default img_obj = Image(img_arr) first_z_slice = img_obj[...,0] # in fact returns an X slice I suppose that we could check that arrays are fortran index ordered in the Image __init__ routine. An alternative proposal - XYZ ordering of output coordinates ```````````````````````````````````````````````````````````` JT: Another thought, that is a compromise between the XYZ coordinates and Fortran ordering. To me, having worked mostly with C-type arrays, when I index an array I think in C terms. But, the Image objects have the "warp" attached to them, which describes the output coordinates. We could insist that the output coordinates are XYZT (or make this an option). So, for instance, if the 4x4 transform was the identity, the following two calls would give something like:: interp = interpolator(img) img[3,4,5] == interp(5,4,3) This way, users would be sure in the interpolator of the order of the coordinates, but users who want access to the array would know that they would be using the array order on disk... I see that a lot of users will want to think of the first coordinate as "x", but depending on the sampling the [0] slice of img may be the leftmost or the rightmost. To find out which is which, users will have to look at the 4x4 transform (or equivalently the start and the step). So just knowing the first array coordinate is the "x" coordinate still misses some information, all of which is contained in the transform. MB replied: I agree that the output coordinates are very important - and I think we all agree that this should be XYZ(T)? For the raw array indices - it is very common for people to want to do things to the raw image array - the quickstart examples containing a few - and you usually don't care about which end of X is left in that situation, only which spatial etc dimension the index refers to. nipy-0.6.1/doc/devel/code_discussions/usecases/000077500000000000000000000000001470056100100215065ustar00rootroot00000000000000nipy-0.6.1/doc/devel/code_discussions/usecases/batching.rst000066400000000000000000000004711470056100100240210ustar00rootroot00000000000000.. _batching: ================== Batching use cases ================== Using the nipy_ framework for creating scripts to process whole datasets, for example movement correction, coregistration of functional to structural (intermodality), smoothing, statistics, inference. .. include:: ../../../links_names.txt nipy-0.6.1/doc/devel/code_discussions/usecases/images.rst000066400000000000000000000117571470056100100235200ustar00rootroot00000000000000.. _image_usecases: ======================= Image model use cases ======================= In which we lay out the various things that users and developers may want to do to images. See also :ref:`resampling` Taking a mean over a 4D image ============================= We could do this much more simply than below, this is just an example of reducing over a particular axis:: # take mean of 4D image from glob import glob import numpy as np import nipy as ni fname = 'some4d.nii' img_list = ni.load_list(fname, axis=3) vol0 = img_list[0] arr = vol0.array[:] for vol in img_list[1:]: arr += vol.array mean_img = ni.Image(arr, vol0.coordmap) ni.save(mean_img, 'mean_some4d.nii') Taking mean over series of 3D images ==================================== Just to show how this works with a list of images:: # take mean of some PCA volumes fnames = glob('some3d*.nii') vol0 = ni.load(fnames[0]) arr = vol0.array[:] for fname in fnames[1:]: vol = ni.load(fname) arr += vol.array mean_img = ni.Image(arr, vol0.coordmap) ni.save(mean_img, 'mean_some3ds.nii') Simple motion correction ======================== This is an example of how the 4D -> list of 3D interface works:: # motion correction img_list = ni.load_list(fname, axis=3) reggie = ni.interfaces.fsl.Register(tol=0.1) vol0 = img_list[0] mocod = [] # unresliced rmocod = [] # resliced for vol in img_list[1:]: rcoord_map = reggie.run(moving=vol, fixed=vol0) cmap = ni.ref.compose(rcoord_map, vol.coordmap) mocovol = ni.Image(vol.array, cmap) # But... try: a_vol = ni.Image(vol.array, rcoord_map) except CoordmapError, msg assert msg == 'need coordmap with voxel input' mocod.append(mocovol) rmocovol = ni.reslice(mocovol, vol0) rmocod.append(rmocovol) rmocod_img = ni.list_to_image(rmocovol) ni.save(rmocod_img, 'rsome4d.nii') try: mocod_img = ni.list_to_image(mocovol) except ImageListError: print 'That is what I thought; the transforms were not the same' Slice timing ============ Here putting 3D image into an image list, and back into a 4D image / array:: # slice timing img_list = ni.load_list(fname, axis=2) slicetimer = ni.interfaces.fsl.SliceTime(algorithm='linear') vol0 = img_list[0] try: vol0.timestamp except AttributeError: print 'we do not have a timestamp' try: vol0.slicetimes except AttributeError: print 'we do not have slicetimes' try: st_list = slicetimer.run(img) except SliceTimeError, msg: assert msg == 'no timestamp for volume' TR = 2.0 slicetime = 0.15 sliceaxis = 2 nslices = vol0.array.shape[sliceaxis] slicetimes = np.range(nslices) * slicetime timestamps = range(len(img_list)) * TR # Either the images are in a simple list for i, img in enumerate(img_list): img.timestamp = timestamps[i] img.slicetimes = slicetimes img.axis['slice'] = sliceaxis # note setting of voxel axis meaning # if the sliceaxes do not match, error when run img_list[0].axis['slice'] = 1 try: st_list = slicetimer.run(img) except SliceTimeError, msg: assert msg == 'images do not have the same sliceaxes'] # Or - with ImageList object img_list.timestamps = timestamps img_list.slicetimes = slicetimes img_list.axis['slice'] = sliceaxis # Either way, we run and save st_list = slicetimer.run(img) ni.save(ni.list_to_image(st_img), 'stsome4d.nii') Creating an image given data and affine ======================================= Showing how we would like the image creation API to look:: # making an image from an affine data = img.array affine = np.eye(4) scanner_img = ni.Image(data, ni.ref.voxel2scanner(affine)) mni_img = ni.Image(data, ni.ref.voxel2mni(affine)) Coregistration / normalization ============================== Demonstrating coordinate maps and non-linear resampling:: # coregistration and normalization anat_img = ni.load_image('anatomical.nii') func_img = ni.load_image('epi4d.nii') template = ni.load_image('mni152T1.nii') # coreg coreger = ni.interfaces.fsl.flirt(tol=0.2) coreg_cmap = coreger.run(fixed=func_img, moving=anat_img) c_anat_img = ni.Image(anat_img.data, coreg_cmap.compose_with(anat_img.cmap)) # calculate normalization parameters template_cmap = template.coordmap template_dims = template.data.shape c_anat_cmap = c_anat_img.coordmap normalizer = ni.interfaces.fsl.fnirt(param=3) norm_cmap = normalizer.run(moving=template, fixed=c_anat_img) # resample anatomical using calculated coordinate map full_cmap = norm_cmap.composed_with(template_cmap) w_anat_data = img.resliced_to_grid(full_cmap, template_dims) w_anat_img = ni.Image(w_anat_data, template.coordmap) # resample functionals with calculated coordinate map w_func_list = [] for img in ni.image_list(func_img, axis=3): w_img_data = img.resliced_to_grid(full_cmap, template_dims) w_func_list.append(ni.Image(w_img_data, template_cmap)) ni.save(ni.list_to_image(w_func_list), 'stsome4d.nii') nipy-0.6.1/doc/devel/code_discussions/usecases/index.rst000066400000000000000000000002561470056100100233520ustar00rootroot00000000000000.. _usecases_index: ====================== Defining use cases ====================== .. toctree:: :maxdepth: 2 transformations images resampling batching nipy-0.6.1/doc/devel/code_discussions/usecases/resampling.rst000066400000000000000000000002201470056100100243730ustar00rootroot00000000000000.. _resampling: ======================= Resampling use cases ======================= Use cases for image resampling. See also :ref:`images`. nipy-0.6.1/doc/devel/code_discussions/usecases/transformations.rst000066400000000000000000000175121470056100100254770ustar00rootroot00000000000000.. _transformations: ========================== Transformation use cases ========================== Use cases for defining and using transforms on images. We should be very careful to only use the terms ``x, y, z`` to refer to physical space. For voxels, we should use ``i, j, k``, or ``i', j', k'`` (i prime, j prime k prime). I have an image *Img*. Image Orientation ----------------- I would like to know what the voxel sizes are. I would like to determine whether it was acquired axially, coronally or sagittally. What is the brain orientation in relation to the voxels? Has it been acquired at an oblique angle? What are the voxel dimensions?:: img = load_image(file) cm = img.coordmap print cm input_coords axis_i: axis_j: axis_k: effective pixel dimensions axis_i: 4mm axis_j: 2mm axis_k: 2mm input/output mapping x y z ------------ i| 90 90 0 j| 90 0 90 k| 180 90 90 input axis_i maps exactly to output axis_z input axis_j maps exactly to output axis_y input axis_k maps exactly to output axis_x flipped 180 output_coords axis0: Left -> Right axis1: Posterior -> Anterior axis2: Inferior -> Superior In the case of a mapping that does not exactly align the input and output axes, something like:: ... input/output mapping input axis0 maps closest to output axis2 input axis1 maps closest to output axis1 input axis2 maps closest to output axis0 ... If the best matching axis is reversed compared to input axis:: ... input axis0 maps [closest|exactly] to negative output axis2 and so on. Creating transformations / coordinate maps ------------------------------------------- I have an array *pixelarray* that represents voxels in an image and have a matrix/transform *mat* which represents the relation between the voxel coordinates and the coordinates in scanner space (world coordinates). I want to associate the array with the matrix:: img = load_image(infile) pixelarray = np.asarray(img) (*pixelarray* is an array and does not have a coordinate map.):: pixelarray.shape (40,256,256) So, now I have some arbitrary transformation matrix:: mat = np.zeros((4,4)) mat[0,2] = 2 # giving x mm scaling mat[1,1] = 2 # giving y mm scaling mat[2,0] = 4 # giving z mm scaling mat[3,3] = 1 # because it must be so # Note inverse diagonal for zyx->xyz coordinate flip I want to make an ``Image`` with these two:: coordmap = voxel2mm(pixelarray.shape, mat) img = Image(pixelarray, coordmap) The ``voxel2mm`` function allows separation of the image *array* from the size of the array, e.g.:: coordmap = voxel2mm((40,256,256), mat) We could have another way of constructing image which allows passing of *mat* directly:: img = Image(pixelarray, mat=mat) or:: img = Image.from_data_and_mat(pixelarray, mat) but there should be "only one (obvious) way to do it". Composing transforms '''''''''''''''''''' I have two images, *img1* and *img2*. Each image has a voxel-to-world transform associated with it. (The "world" for these two transforms could be similar or even identical in the case of an fmri series.) I would like to get from voxel coordinates in *img1* to voxel coordinates in *img2*, for resampling:: imgA = load_image(infile_A) vx2mmA = imgA.coordmap imgB = load_image(infile_B) vx2mmB = imgB.coordmap mm2vxB = vx2mmB.inverse # I want to first apply transform implied in # cmA, then the inverse of transform implied in # cmB. If these are matrices then this would be # np.dot(mm2vxB, vx2mmA) voxA_to_voxB = mm2vxB.composewith(vx2mmA) The (matrix) multiply version of this syntax would be:: voxA_to_voxB = mm2vxB * vx2mmA Composition should be of form ``Second.composewith(First)`` - as in ``voxA_to_voxB = mm2vxB.composewith(vx2mmA)`` above. The alternative is ``First.composewith(Second)``, as in ``voxA_to_voxB = vx2mmA.composewith(mm2vxB)``. We choose ``Second.composewith(First)`` on the basis that people need to understand the mathematics of function composition to some degree - see wikipedia_function_composition_. .. _wikipedia_function_composition: http://en.wikipedia.org/wiki/Function_composition Real world to real world transform '''''''''''''''''''''''''''''''''' We remind each other that a mapping is a function (callable) that takes coordinates as input and returns coordinates as output. So, if *M* is a mapping then:: [i',j',k'] = M(i, j, k) where the *i, j, k* tuple is a coordinate, and the *i', j', k'* tuple is a transformed coordinate. Let us imagine we have somehow come by a mapping *T* that relates a coordinate in a world space (mm) to other coordinates in a world space. A registration may return such a real-world to real-world mapping. Let us say that *V* is a useful mapping matching the voxel coordinates in *img1* to voxel coordinates in *img2*. If *img1* has a voxel to mm mapping *M1* and *img2* has a mm to voxel mapping of *inv_M2*, as in the previous example (repeated here):: imgA = load_image(infile_A) vx2mmA = imgA.coordmap imgB = load_image(infile_B) vx2mmB = imgB.coordmap mm2vxB = vx2mmB.inverse then the registration may return the some coordinate map, *T* such that the intended mapping *V* from voxels in *img1* to voxels in *img2* is:: mm2vxB_map = mm2vxB.mapping vx2mmA_map = vx2mmA.mapping V = mm2vxB_map.composewith(T.composedwith(vx2mmA_map)) To support this, there should be a CoordinateMap constructor that looks like this:: T_coordmap = mm2mm(T) where *T* is a mapping, so that:: V_coordmap = mm2vxB.composewith(T_coordmap.composedwith(vx2mmA)) I have done a coregistration between two images, *img1* and *img2*. This has given me a voxel-to-voxel transformation and I want to store this transformation in such a way that I can use this transform to resample *img1* to *img2*. :ref:`resampling` I have done a coregistration between two images, *img1* and *img2*. I may want this to give me a worldA-to-worldB transformation, where worldA is the world of voxel-to-world for *img1*, and worldB is the world of voxel-to-world of *img2*. My *img1* has a voxel to world transformation. This transformation may (for example) have come from the scanner that acquired the image - so telling me how the voxel positions in *img1* correspond to physical coordinates in terms of the magnet isocenter and millimeters in terms of the primary gradient orientations (x, y and z). I have the same for *img2*. For example, I might choose to display this image resampled so each voxel is a 1mm cube. Now I have these transformations: ST(*img1*-V2W), and ST(*img2*-V2W) (where ST is *scanner transform* as above, and *V2W* is voxel to world). I have now done a coregistration between *img1* and *img2* (somehow) - giving me, in addition to *img1* and *img2*, a transformation that registers *img1* and *img2*. Let's call this transformation V2V(*img1*, *img2*), where V2V is voxel-to-voxel. In actuality *img2* can be an array of images, such as series of fMRI images and I want to align all the *img2* series to *img1* and then take these voxel-to-voxel aligned images (the *img1* and *img2* array) and remap them to the world space (voxel-to-world). Since remapping is an interpolation operation I can generate errors in the resampled pixel values. If I do more than one resampling, error will accumulate. I want to do only a single resampling. To avoid the errors associated with resampling I will build a *composite transformation* that will chain the separate voxel-to-voxel and voxel-to-world transformations into a single transformation function (such as an affine matrix that is the result of multiplying the several affine matrices together). With this single *composite transformatio* I now resample *img1* and *img2* and put them into the world coordinate system from which I can make measurements. nipy-0.6.1/doc/devel/development_quickstart.rst000066400000000000000000000052321470056100100216630ustar00rootroot00000000000000.. _development-quickstart: ======================== Development quickstart ======================== Source Code =========== NIPY uses github_ for our code hosting. For immediate access to the source code, see the `nipy github`_ site. Checking out the latest version =============================== To check out the latest version of nipy you need git_:: git clone git://github.com/nipy/nipy.git There are two methods to install a development version of nipy. For both methods, build the extensions in place:: python setup.py build_ext --inplace Then you can either: #. Create a symbolic link in your *site-packages* directory to the inplace build of your source. The advantage of this method is it does not require any modifications of your PYTHONPATH. #. Place the source directory in your PYTHONPATH. With either method, all of the modifications made to your source tree will be picked up when nipy is imported. Getting data files ================== See :ref:`data_files`. Guidelines ========== We have adopted many developer guidelines in an effort to make development easy, and the source code readable, consistent and robust. Many of our guidelines are adopted from the scipy_ / numpy_ community. We welcome new developers to the effort, if you're interested in developing code or documentation please join the `nipy mailing list`_ and introduce yourself. If you plan to do any code development, we ask that you take a look at the following guidelines. We do our best to follow these guidelines ourselves: * :ref:`howto_document` : Documentation is critical. This document describes the documentation style, syntax, and tools we use. * `Numpy/Scipy Coding Style Guidelines: `_ This is the coding style we strive to maintain. * :ref:`development-workflow` : This describes our process for version control. * :ref:`testing` : We've adopted a rigorous testing framework. * :ref:`optimization`: "premature optimization is the root of all evil." .. _trunk_download: Submitting a patch ================== The preferred method to submit a patch is to create a branch of nipy on your machine, modify the code and make a patch or patches. Then email the `nipy mailing list`_ and we will review your code and hopefully apply (merge) your patch. See the instructions for :ref:`making-patches`. If you do not wish to use git and github, please feel free to file a bug report and submit a patch or email the `nipy mailing list`_. Bug reports =========== If you find a bug in nipy, please submit a bug report at the `nipy bugs`_ github site so that we can fix it. .. include:: ../links_names.txt nipy-0.6.1/doc/devel/guidelines/000077500000000000000000000000001470056100100164635ustar00rootroot00000000000000nipy-0.6.1/doc/devel/guidelines/build_debug.rst000066400000000000000000000027611470056100100214700ustar00rootroot00000000000000################### Debugging the build ################### We use `Meson `_ build system, that you will generally use via the `meson-python `_ frontend. Meson-Python is the wrapper that causes a `pip` command to further call Meson to build Nipy files ready for import. This can be a problem when you call a command like `pip install .` in the Nipy root directory, and get an obscure error message. It can be difficult to work out where the build failed. *********************** Debug for build failure *********************** To debug builds, drop out of the Meson-Python frontend by invoking Meson directly. First make sure you have Meson installed, along with its build backend `Ninja `_:: pip install meson ninja You may also need Cython>=3:: pip install "cython>=3" From the Nipy repository root directory (containing the `pyproject.toml` file):: meson setup build This will configure the Meson build in a new subdirectory ``build``. Then:: cd build ninja -j1 This will set off the build with a single thread (`-j1`). Prefer a single thread so you get a sequential build. This means that you will see each step running in turn, and you will get any error message at the end of the output. Conversely, if you run with multiple threads (the default), then you'll see warnings and similar from multiple threads, and it will be more difficult to spot the error message among the other outputs. nipy-0.6.1/doc/devel/guidelines/changelog.rst000066400000000000000000000032731470056100100211510ustar00rootroot00000000000000.. _changelog: =============== The ChangeLog =============== **NOTE:** We have not kepted up with our ChangeLog. This is here for future reference. We will be more diligent with this when we have regular software releases. If you are a developer with commit access, **please** fill a proper ChangeLog entry per significant change. The SVN commit messages may be shorter (though a brief summary is appreciated), but a detailed ChangeLog is critical. It gives us a history of what has happened, allows us to write release notes at each new release, and is often the only way to backtrack on the rationale for a change (as the diff will only show the change, not **why** it happened). Please skim the existing ChangeLog for an idea of the proper level of detail (you don't have to write a novel about a patch). The existing ChangeLog is generated using (X)Emacs' fantastic ChangeLog mode: all you have to do is position the cursor in the function/method where the change was made, and hit 'C-x 4 a'. XEmacs automatically opens the ChangeLog file, mark a dated/named point, and creates an entry pre-titled with the file and function name. It doesn't get any better than this. If you are not using (X)Emacs, please try to follow the same convention so we have a readable, organized ChangeLog. To get your name in the ChangeLog, set this in your .emacs file: (setq user-full-name "Your Name") (setq user-mail-address "youradddress@domain.com") Feel free to obfuscate or omit the address, but at least leave your name in. For user contributions, try to give credit by name on patches or significant ideas, but please do an @ -> -AT- replacement in the email addresses (users have asked for this in the past). nipy-0.6.1/doc/devel/guidelines/commit_codes.rst000066400000000000000000000033461470056100100216700ustar00rootroot00000000000000.. _commit-codes: Commit message codes --------------------- Please prefix all commit summaries with one (or more) of the following labels. This should help others to easily classify the commits into meaningful categories: * *BF* : bug fix * *RF* : refactoring * *ENH* : new feature or extended functionality * *BW* : addresses backward-compatibility * *OPT* : optimization * *BK* : breaks something and/or tests fail * *DOC*: for all kinds of documentation related commits * *TEST* : for adding or changing tests * *STY* : PEP8 conformance, whitespace changes etc that do not affect function. * *WIP* : Work in progress; please try and avoid using this one, and rebase incomplete changes into functional units using e.g. ``git rebase -i`` So your commit message might look something like this:: TEST: relax test threshold slightly Attempted fix for failure on windows test run when arrays are in fact very close (within 6 dp). Keeping up a habit of doing this is useful because it makes it much easier to see at a glance which changes are likely to be important when you are looking for sources of bugs, fixes, large refactorings or new features. Pull request codes ------------------ When you submit a pull request to github, github will ask you for a summary. If your code is not ready to merge, but you want to get feedback, please consider using ``WIP - me working on image design`` or similar for the title of your pull request. That way we will all know that it's not yet ready to merge and that you may be interested in more fundamental comments about design. When you think the pull request is ready to merge, change the title (using the *Edit* button) to something like ``MRG - my work on image design``. nipy-0.6.1/doc/devel/guidelines/compiling_windows.rst000066400000000000000000000012301470056100100227440ustar00rootroot00000000000000Some notes on compiling on windows with Visual Studio ----------------------------------------------------- I followed instructions here: http://wiki.cython.org/64BitCythonExtensionsOnWindows First I downloaded and installed from here: http://download.microsoft.com/download/2/E/9/2E911956-F90F-4BFB-8231-E292A7B6F287/GRMSDKX_EN_DVD.iso via here: http://www.microsoft.com/en-us/download/details.aspx?id=18950#instructions Then I got Visual Studio 2008 from here: http://www.microsoft.com/en-us/download/details.aspx?id=14597 (file ``vcsetup.exe``) with hints from here: http://docs.python.org/devguide/setup.html#windows http://bugs.python.org/issue16161 nipy-0.6.1/doc/devel/guidelines/coverage_testing.rst000066400000000000000000000022601470056100100225450ustar00rootroot00000000000000 Coverage Testing ---------------- Coverage testing is a technique used to see how much of the code is exercised by the unit tests. It is important to remember that a high level of coverage is a necessary but not sufficient condition for having effective tests. Coverage testing can be useful for identifying whole functions or classes which are not tested, or for finding certain conditions which are never tested. This is an excellent task for pytest_ - the automated test runner we are using. Pytest can run the `python coverage tester`_. First make sure you have the coverage test plugin installed on your system:: pip install pytest-cov Run Pytest with coverage testing arguments:: pytest --cov=nipy --doctest-plus nipy The coverage report will cover any python source module imported after the start of the test. This can be noisy and difficult to focus on the specific module for which you are writing tests. For instance, the default report also includes coverage of most of ``numpy``. To focus the coverage report, you can provide Pytest with the specific package you would like output from using the ``--cov=nipy`` (the option above). .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/guidelines/debugging.rst000066400000000000000000000023751470056100100211570ustar00rootroot00000000000000=========== Debugging =========== Some options are: Run in ipython -------------- As in:: In [1]: run mymodule.py ... (somecrash) In [2]: %debug Then diagnose, using the workspace that comes up, which has the context of the crash. You can also do:: In [1] %pdb on In [2]: run mymodule.py ... (somecrash) At that point you will be automatically dropped into the the workspace in the context of the error. This is very similar to the matlab ``dbstop if error`` command. See the `ipython manual`_ , and `debugging in ipython `_ for more detail. Embed ipython in crashing code ------------------------------ Often it is not possible to run the code directly from ipython using the ``run`` command. For example, the code may be called from some other system such as sphinx_. In that case you can embed. At the point that you want ipython to open with the context available for introspection, add:: from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed() ipshell() See `embedding ipython `_ for more detail. .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/guidelines/elegant.py000066400000000000000000000003161470056100100204540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import matplotlib.pyplot as plt plt.plot([1,2,3], [4,5,6]) plt.ylabel('some more numbers') nipy-0.6.1/doc/devel/guidelines/gitwash/000077500000000000000000000000001470056100100201315ustar00rootroot00000000000000nipy-0.6.1/doc/devel/guidelines/gitwash/branch_dropdown.png000066400000000000000000000376671470056100100240330ustar00rootroot00000000000000PNG  IHDR7'piCCPICC ProfilexTkA6n"Zkx"IYhE6bk Ed3In6&*Ezd/JZE(ޫ(b-nL~7}ov r4 Ril|Bj A4%UN$As{z[V{wwҶ@G*q Y<ߡ)t9Nyx+=Y"|@5-MS%@H8qR>׋infObN~N>! ?F?aĆ=5`5_M'Tq. VJp8dasZHOLn}&wVQygE0  HPEaP@<14r?#{2u$jtbDA{6=Q<("qCA*Oy\V;噹sM^|vWGyz?W15s-_̗)UKuZ17ߟl;=..s7VgjHUO^gc)1&v!.K `m)m$``/]?[xF QT*d4o(/lșmSqens}nk~8X<R5 vz)Ӗ9R,bRPCRR%eKUbvؙn9BħJeRR~NցoEx pHYs   IDATxEi9'IdADO' z9ܙ!D# I0P3Kf[l M;3vOwW~aHIvio.UTCo+"dEcml"E?%E@PE@ʄx(182:"(@!G .eSE ĬLHP$e͢(@!G fe(bŊrHy"("2={[WE@(ĬLjEcRȡ)"8 :`ӣ"( s&§犀"( 2qt^"( AڸqdggKNN.A(@!2!Pݎsʮ]L2Rre)RTnI4Z"(B &e*G8%7"P+$S-CPD .ecR&x$,;cOMKPE bd(ݱc(QS%E@P4D feIL\i(@#W+?% ,6mʘ^/_~x-(G nLKsQ$ ,FI˖-3&<Ɨ.W\i畊+*{viM(@)<I9%ڼuVٶml߽=E8S6E@[$ X\H2J,)[lї:3uhE xU'@+t;d.-[P@̞S$ #KVKW.L Wӵ5"(B fτ Yɕ\}|]xŇV"2I&+۰iԩl2^vav#d#N^(UVɺuZjRFjNzh/;%dee%͠D۷oKsznO9L /;1Xxnb +~z!Mr墲2$*q&[nիWr䭷*gyf蚞Ew}.;lР{RfiÆ r=?OUL:Ejת9pnw`~{Nxd~ew}jd=䓂p;t sLTՍ~xRby:uEʒ%K^T)]virAw/`>Bzٽ{} ?mڴv wyGڨ2I7བɁv .H6m*gϖ^{Mn69m۶ZMO$^~ >\;8.\P|Myg妛nҥKh{K/&M~p޽U,OYێ*U]_msc/g cǎ!#_}U+vȚkGJB8q؇n"sNQ:t(Txo\xRlYK/$x G}={R;wJCkB <,N(FZ3g ^K|VHq0y.ȑ#_ꫯUW]%͚5#8B "X͛7{W~+tbLÆ m9Xb6mbh{/|r'_Wڵk@c;~wߑ[ ˱W^ ޼y }?|p\ƍg:BKWU6? Huֶ<'AK.:/+sD](@/ݏ.Æ G$ba/_܎-ZȔ)S/~ 7ZƳ|;G+0ygołڰWζV=_Te2.b v BGG3y?;(y~| k֬}֬Y{=}t s \rp=a0.?DG)sذa?[h b*4ƀ9ꨣd` /1O>9JKٖMP:i$lZL6ncYNaѢE¡켤Gx@Xx*Z11]NW\!u-( 0

    "9F+<<_XG'KZog5;R+ٚcsN;Iǘ&-a0ȏgt!뮻jSc1 oRqpFgeG_EsN`KS'9XtoBcb`mϋ;rI'> N P" Lh}acgkSh7l0p<2s]G#yY.| w϶?cJD;Q&<xx2ž6 HyCNPsqJx@y7"K^ Gye=V0 uȥ "rP Xoz#er M^~ߴ {8|Y=A}= sG:9o1@9Ƴ+7^xu]/WopCymLgr1ec(ǟ~ɮ"v>j(hYb(>euͷS47cB.$9e8 -,,9gyBX,pĈ@(Q&eYgMs p2eO Q\n1^_>d +3ʧ?Ƴw|;1Y!ƒwj<V1~Q<'ƌÈ3V["+ bVp&<bAXXj(E`0)d3AzuG C(zZLb9(^k& BVĬ/bC!# sw;PXTXX=1QZx6 a Cµ aG{L)G @p>u1YBxxڽցB(EP Yy6k*+lD7 4ÍX>lx($r%/l)Bsm(x1&! Z#ؿ:E;?ZAhUB[b` ?ﴉX4&0?f lIt :v屰...{X( ڋB[<WQ(XgD!xq便m\l%a /Ed ;ʉ0uxN9~Tv)%m=;e&l%&O:b JH ~'ZA?>z|ǭ (o.x1^ 2q -Ld){CD;Ὗ F=?y.mߏ;'r.ZY픉kc4~‘ 7ڽ(lu(^3pD)3gA ")aq??H(AGݥK#8S$o[~gbx^#O˛YE  \M8-OuޙۍeNCi =E@P ŢHh sYE dS4WO8c&n(+^YP3$ԕĪP[P@Lh&AI!PҀdCA#,e",V WSP2I*@e2q.OF~E@P1+<]qDIIPE 5Y$] $a-_P!vɃBKVE@+LSE@!az("WR&0^k늀">q)d6)E@PQ&[^J"(@ʄ0$LOPkE@P2 ]͕i=UE $4\RRE@HORFU̙3Q:fZjsZM(@(p0q.Bĉ A+ "Hea.'s>Ϝ֖zRF\ymh P )Lמ ڒ> 'M)LThI0 TS ~dٲeI`WٹsgvMM, ޴i#9RnuP\GrrrK<t:/毰ԟ2]|G2d|ouѣwߕ\~m"&3f|3DL3m4KbŊ6 Cx@ʖ-1߁܈ͷȣ>j7N|M)"'tL0~*.k狣ϸ׼ dʔ)&|ʹN:裥٭!-^,gϖݻ˜9sU˖/1iУG4_*]wK{4NG1߁/{nr*/tmOڵe%ˌ-ZhU ɿp"yᇥ|+tӦMK.R%K&!oꫯ ^|<q믥wض}|7ܥ=2S$>AygeRn]믗^{MXȱkfثL ܬ^5^A!x?5sJ#FHfͤtҲ]ZL\8.ާl^'#G/8?vfR(KL' ,*UHdk6/K.@|ҨqcOQ&U2+r,{l߶]8it9KR."q֭Qvl&cƎ=T21xL Odƍ%C5;%9[o2fs 5a0`<H>c)W\pҧO \yrmmV{VVL2RjU!1eԯ__nQ4PnlXLrXXSXi{YP_XS3FYѶm[˃߰a﷗]v^޿4(X˧|y?kS(Ν:ncpR$kc93beWQ& .wBA?o-x!%J&?V3C t옱f[f7c ܢxa^¥\rԠ`;eT;w7cRRVm_|w4*U,mBg\iƌ(p\pT6iFFQZR%Rv-kn}No/yP$M~4-[ӌт,;̸XmfO27}[8-ttR5҇(B͟7O~|~+fq/;#ϟo"+"a&B=΄XyymƏ/O= vw /o 7 dy(3<ӆ 9}";#"qGh1y;/%8CkrMʲ crs$EKL?ۍC[ O?]N0b c"*#QNc~fԑx{Xx{ {93{3{UH8-sڵ/o 7vt5kJ+Y,ܽ>n6J%#ع&mH KV^-r@:Frmٱ2F:͆`$k9oo#G`G3,P&ccN8a PK䷡&?slF܄CI._]KeBypDXc"KCuGpф#p8G*U moL>J3%I=ocIDATX9-wqͱpp^阏p"x&4 Q&oЮGyTnN?ZL &%̬Y/ #+y;/XsgY(͛['aF(!4ŏ,i GG9qП,uD>ƆnǂI~m͢ʯf+}Y5廴(+'%Jڹqƣui7DE̘ܽ> !7:(;ǜ ![j5kAаaCۇ^}}\g/c[ل]^\]ys4 ?c C}wǔQ&`'㈉o0eJw%  AUQ>>8/`fnPHe|!(^/؆X+bC\sUB-$Gi/õϛ6 :ƬC &K;aܲX-j&kըi ,\,L`/X; F)ϦXe5 X5CPV4R^Ь_^s%=WHe";qk,.pCA!nx줴R'G8(bHe`-X;ڍQڦMkU[xKxJX=Wx)G>,Oa1OM11Ka,᥏7wC@ Tgs8K(#K[+ׄth9\΍p@@ LW26[ݽleO2̋q`LK/ԄP Ʋs:$B)Y1{cBxn|1G6[TbRы/h{XD{1yvn9D!-!^²@aI:x3 " %E) X,ѡCeya1`𜼫E 2Y|LBM'BP/W]iP[m#dwѷ,dJռh4&JB" ,dBPhtd?~ "cxMX~#GV"(/}^,D(Oe[\)p#(?7K_12B<%ry? iiR7k"g{)sy/DJnuQ޹P^,,yR3!%lx#@|ax83ibB1NLHPho! pD -rÕ R3 bR( ‚VJ,eEjm{X:)LrWyGA@mO"2Ŀ-zv0#0nfM'sݲc C+@"t>\ g>8E|ʳ@x1d"%%E@P W&vgÛU?攬9PC>~lKXrE@D eLܜ`0ztjQY-eJKבWX!7l>Gז.~PVdo.ij>ZT|gfE=][v%d)oD"ҾIA2m&8rt_%D19k-bOVMҺay߫<>bIx8CI2a|>m1ewm]E.^/!VhnRe~qRSJ:emY?(5 ~ώZ":T_))"+i'16%sdT˷}oϕV_T^b>(#sn_Ⱥ-;euҠF?jZS#t[/+(P>}59}U5i|;glپ;vֺ߸C_nfo)ύ\lQT15F@iӻ_+KS߮wXLPsAKv˲5$g95vȲ[m{C.ꕲdњuOPH;ĵcjˑk0}4WN֣jyTaE^W+e֣Ueyl"Y*su{@FKKm#`:1}6R{_W"h]{m\T*-!233+*Wח*QT~0&ay;_>lTԭRJf-߫8Vzd=HuX#\6(@XV/=|n2$DUPoѠLAV/ePT,%Kx27c 4 s*ұy%pcLNʕޛ7J{o)*+V.e-Wfowt#jHrY7˄Gݐ0U-HuoE@P;AISMdž~[/o{]A %%sCdY% uK8>R,>|2m4+Ԃ3g|=cܹsn$׿T~TXQn>''G;+L/j_P Q{+? &iFV^N=APxt%|С2sL^P?s2"űT}>xqKLo7X;'|ڵK7n,SL Dwmn0|Pϟ/ӧO5ju]g|r 'Xo&MX˞˖-<?!UW]%>޽[(W+]t0yfڵ 6L*UڻARļԨQ#yWyhhBB 8PUۻh/M 6E7Qz1h֬Y`y,6OK(!}/ԩuY6-B}Q~~,<!ʈijǢEA+(ȔV&u֥}J@@2ʮ\~J*UNM2˒%Kc A^f۵kN)2)S̛7Oz衰|\xA 䡠z ;V.+_]{!>'(VZ.תU A2zx,MbMGX#Zyx]%T|8ʇzAދ;B$(7gʏDx}USZDb^+Baopks׽GC=Z:}ժUvڲ`w* a,k/BX>=)~G9#w^2wy ]?ކr6mjP<ՆN|+++zjSNL($}䊉5chE*#ZիW;mpT1exoşJ+@B@M0NFGu<ʆ֯_oqI>}֭_g=nIPE)qR|y;ҦM C?O?ٹ* j_PA#(M翼,&Md%:bQP?4юy-Ëc:3^Wpb=z#FE,UuYN=T KI{feur)0Q}儢ݻ XrT/:̩ 3<3l\dmps<({,bzw#}Am"֡C2Hm']T"\AX9F˖-CϠWOky-O>z8SzGJ@aA`ĉvN8tBT􌇘#aC7/)VԬwj=9&^~ suL[(@!$D9RE Pev] +"z2I>QE@H;RzuJ"(@*N~_{5c:uo³Y"/3^~cv3pyK~l^Xxّ - !CwY7ߴoE owU֮]kʇ"7=gƌ6lX9zhzmӦMv;q'KhvȧU}rԩ[ZK Cmʕ+oav8Xڿ[4o}JoܩsD kРAv71/Gmp;$nΧ(PX6QP!oc1;+W.|xGn:Yξ XӢ8_l:I{#L Ldɒ69O*]J*WKAuMtk[!Xp\wuv_5ppJܕv#@UG)(/epn"H[ejd<O<Ɵ}1cX=& ^ ^"ć@ʆkdh.ڶBAC.X3(z&dE@P. N4Z"(* tm"(F@IE@@Td`kE@H4L(@"$;](@PehD= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option:: git push --set-upstream origin my-new-feature From now on git will know that ``my-new-feature`` is related to the ``my-new-feature`` branch in the github repo. .. _edit-flow: The editing workflow ==================== Overview -------- :: # hack hack git add my_new_file git commit -am 'NF - some message' git push In more detail -------------- #. Make some changes #. See which files have changed with ``git status`` (see `git status`_). You'll see a listing like this one: .. code-block:: none # On branch ny-new-feature # Changed but not updated: # (use "git add ..." to update what will be committed) # (use "git checkout -- ..." to discard changes in working directory) # # modified: README # # Untracked files: # (use "git add ..." to include in what will be committed) # # INSTALL no changes added to commit (use "git add" and/or "git commit -a") #. Check what the actual changes are with ``git diff`` (`git diff`_). #. Add any new files to version control ``git add new_file_name`` (see `git add`_). #. To commit all modified files into the local copy of your repo,, do ``git commit -am 'A commit message'``. Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_ |emdash| and the helpful use-case description in the `tangled working copy problem`_. The `git commit`_ manual page might also be useful. #. To push the changes up to your forked repo on github, do a ``git push`` (see `git push`_). Ask for your changes to be reviewed or merged ============================================= When you are ready to ask for someone to review your code and consider a merge: #. Go to the URL of your forked repo, say ``https://github.com/your-user-name/nipy``. #. Use the 'Switch Branches' dropdown menu near the top left of the page to select the branch with your changes: .. image:: branch_dropdown.png #. Click on the 'Pull request' button: .. image:: pull_button.png Enter a title for the set of changes, and some explanation of what you've done. Say if there is anything you'd like particular attention for - like a complicated change or some code you are not happy with. If you don't think your request is ready to be merged, just say so in your pull request message. This is still a good way of getting some preliminary code review. Some other things you might want to do ====================================== Delete a branch on github ------------------------- :: git checkout main # delete branch locally git branch -D my-unwanted-branch # delete branch on github git push origin :my-unwanted-branch Note the colon ``:`` before ``my-unwanted-branch``. See also: https://help.github.com/articles/pushing-to-a-remote/#deleting-a-remote-branch-or-tag Several people sharing a single repository ------------------------------------------ If you want to work on some stuff with other people, where you are all committing into the same repository, or even the same branch, then just share it via github. First fork nipy into your account, as from :ref:`forking`. Then, go to your forked repository github page, say ``https://github.com/your-user-name/nipy`` Click on the 'Admin' button, and add anyone else to the repo as a collaborator: .. image:: pull_button.png Now all those people can do:: git clone git@githhub.com:your-user-name/nipy.git Remember that links starting with ``git@`` use the ssh protocol and are read-write; links starting with ``git://`` are read-only. Your collaborators can then commit directly into that repo with the usual:: git commit -am 'ENH - much better code' git push origin main # pushes directly into your repo Explore your repository ----------------------- To see a graphical representation of the repository branches and commits:: gitk --all To see a linear list of commits for this branch:: git log You can also look at the `network graph visualizer`_ for your github repo. Finally the :ref:`fancy-log` ``lg`` alias will give you a reasonable text-based graph of the repository. .. _rebase-on-trunk: Rebasing on trunk ----------------- Let's say you thought of some work you'd like to do. You :ref:`update-mirror-trunk` and :ref:`make-feature-branch` called ``cool-feature``. At this stage trunk is at some commit, let's call it E. Now you make some new commits on your ``cool-feature`` branch, let's call them A, B, C. Maybe your changes take a while, or you come back to them after a while. In the meantime, trunk has progressed from commit E to commit (say) G: .. code-block:: none A---B---C cool-feature / D---E---F---G trunk At this stage you consider merging trunk into your feature branch, and you remember that this here page sternly advises you not to do that, because the history will get messy. Most of the time you can just ask for a review, and not worry that trunk has got a little ahead. But sometimes, the changes in trunk might affect your changes, and you need to harmonize them. In this situation you may prefer to do a rebase. rebase takes your changes (A, B, C) and replays them as if they had been made to the current state of ``trunk``. In other words, in this case, it takes the changes represented by A, B, C and replays them on top of G. After the rebase, your history will look like this: .. code-block:: none A'--B'--C' cool-feature / D---E---F---G trunk See `rebase without tears`_ for more detail. To do a rebase on trunk:: # Update the mirror of trunk git fetch upstream # go to the feature branch git checkout cool-feature # make a backup in case you mess up git branch tmp cool-feature # rebase cool-feature onto trunk git rebase --onto upstream/main upstream/main cool-feature In this situation, where you are already on branch ``cool-feature``, the last command can be written more succinctly as:: git rebase upstream/main When all looks good you can delete your backup branch:: git branch -D tmp If it doesn't look good you may need to have a look at :ref:`recovering-from-mess-up`. If you have made changes to files that have also changed in trunk, this may generate merge conflicts that you need to resolve - see the `git rebase`_ man page for some instructions at the end of the "Description" section. There is some related help on merging in the git user manual - see `resolving a merge`_. .. _recovering-from-mess-up: Recovering from mess-ups ------------------------ Sometimes, you mess up merges or rebases. Luckily, in git it is relatively straightforward to recover from such mistakes. If you mess up during a rebase:: git rebase --abort If you notice you messed up after the rebase:: # reset branch back to the saved point git reset --hard tmp If you forgot to make a backup branch:: # look at the reflog of the branch git reflog show cool-feature 8630830 cool-feature@{0}: commit: BUG: io: close file handles immediately 278dd2a cool-feature@{1}: rebase finished: refs/heads/my-feature-branch onto 11ee694744f2552d 26aa21a cool-feature@{2}: commit: BUG: lib: make seek_gzip_factory not leak gzip obj ... # reset the branch to where it was before the botched rebase git reset --hard cool-feature@{2} .. _rewriting-commit-history: Rewriting commit history ------------------------ .. note:: Do this only for your own feature branches. There's an embarrassing typo in a commit you made? Or perhaps the you made several false starts you would like the posterity not to see. This can be done via *interactive rebasing*. Suppose that the commit history looks like this:: git log --oneline eadc391 Fix some remaining bugs a815645 Modify it so that it works 2dec1ac Fix a few bugs + disable 13d7934 First implementation 6ad92e5 * masked is now an instance of a new object, MaskedConstant 29001ed Add pre-nep for a copule of structured_array_extensions. ... and ``6ad92e5`` is the last commit in the ``cool-feature`` branch. Suppose we want to make the following changes: * Rewrite the commit message for ``13d7934`` to something more sensible. * Combine the commits ``2dec1ac``, ``a815645``, ``eadc391`` into a single one. We do as follows:: # make a backup of the current state git branch tmp HEAD # interactive rebase git rebase -i 6ad92e5 This will open an editor with the following text in it:: pick 13d7934 First implementation pick 2dec1ac Fix a few bugs + disable pick a815645 Modify it so that it works pick eadc391 Fix some remaining bugs # Rebase 6ad92e5..eadc391 onto 6ad92e5 # # Commands: # p, pick = use commit # r, reword = use commit, but edit the commit message # e, edit = use commit, but stop for amending # s, squash = use commit, but meld into previous commit # f, fixup = like "squash", but discard this commit's log message # # If you remove a line here THAT COMMIT WILL BE LOST. # However, if you remove everything, the rebase will be aborted. # To achieve what we want, we will make the following changes to it:: r 13d7934 First implementation pick 2dec1ac Fix a few bugs + disable f a815645 Modify it so that it works f eadc391 Fix some remaining bugs This means that (i) we want to edit the commit message for ``13d7934``, and (ii) collapse the last three commits into one. Now we save and quit the editor. Git will then immediately bring up an editor for editing the commit message. After revising it, we get the output:: [detached HEAD 721fc64] FOO: First implementation 2 files changed, 199 insertions(+), 66 deletions(-) [detached HEAD 0f22701] Fix a few bugs + disable 1 files changed, 79 insertions(+), 61 deletions(-) Successfully rebased and updated refs/heads/my-feature-branch. and the history looks now like this:: 0f22701 Fix a few bugs + disable 721fc64 ENH: Sophisticated feature 6ad92e5 * masked is now an instance of a new object, MaskedConstant If it went wrong, recovery is again possible as explained :ref:`above `. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/following_latest.rst000066400000000000000000000015131470056100100242370ustar00rootroot00000000000000.. highlight:: bash .. _following-latest: ============================= Following the latest source ============================= These are the instructions if you just want to follow the latest *nipy* source, but you don't need to do any development for now. The steps are: * :ref:`install-git` * get local copy of the `nipy github`_ git repository * update local copy from time to time Get the local copy of the code ============================== From the command line:: git clone git://github.com/nipy/nipy.git You now have a copy of the code tree in the new ``nipy`` directory. Updating the code ================= From time to time you may want to pull down the latest code. Do this with:: cd nipy git pull The tree in ``nipy`` will now have the latest changes from the initial repository. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/forking_button.png000066400000000000000000000314441470056100100236770ustar00rootroot00000000000000PNG  IHDR]Vl8E pHYs   IDATx]|Tv7ݴMN]&O  X(("}`@z!!@$ I6{wHŐ ww|s,JlG#i7p8t"p8Dn ͛p8tp8Dn ͛p8tp8D۪SMYVܺu jN:H;Ц22{wG(7o#77}Z+6/_18+{22+ߔ'Tnn.\\\z)H~cCEҿ++3^KX,`B`UC(s*Ҁ{/22'7#G>F},\>ë˗ sݿ/^v;s+p[cF5nGd2{VVՍÙ3gh"A0'OFvٱ߹nɊd-ڕzzz:mۆ9s=33ž鯬C~0~Ure$!nm,W#q(gǝ݌XlP844\K7n' USֽ__\] \d 222@h[/Aadr]3\ĻTnJo8aӻ%&52_VN߫nV䭄ha9r0`0vލM6 駟.ڙ٦z$])Fg,Zn.P:٠3Uӕ+17sBo aMM0V9EGի3f S`f Bq]_w34&k%dAG=pS*V999;w.Fc$o0}tNANj̈́B!@<E~=ֹG ~Jk6hubC}@#S1@H³^gȌn1j˕+W" AAA5kؽS6> |oZlY`ݖ>,uh ؽę}&|~4Ú`{a{hi_}UΘ(d]ٳÐD74'Xfm Z`zd ޚ0Ԥ/{q٫!Lq)5(J/1+pũK|K+bէƏ>ƺ9Bc>GSޝfDND?](f[<kcc\wviprr\*씝6vTJ%m㿳b%/Q8`[BLjo?@ ew26؆c޻E'6W  _B8'^m6@m_ϚA'|%=s}]j8HGǦ`wgiSbȜy8|b7r$*&t+|Nc;#p헏P:GJ+.2. Ô)S'|RHc_}=Z6P(QKؘUN|N6ؤlEqbs!ES0nl"\CoBvn>lB{o6IBѽs#-F!#ؾ= D*βă84I!V<.#\iZe?`(l 6 >َ; pOoʼnU %/8. D/8˨㻘vn'[UH+w-_|@l1b9&N(rY-47{=Q/W[n4b`vq=b,bpu&=\Ip`g8ً+V!JJ@Gc?ExF(>1JO 2/s@~λvH4rl.ӻ-,Ĩc#U"~T|bBǽzDؗLrK@硫]+d)J ('Y<3nʲJZLeE;)yc3QiXʊYc,^A>,#El %I_ɎƙHB# <$W|,S. FqF{-.-R=F?zGzys> 2ȭ0.峂dSr%b>\f1eiggg和ܸhME(p]_ ĶC`zP__cgA ~U%%Yt5q˞ՁÈ!kl;HdiLט\.8>n0y: aPK%:`#"DJ.])b#P|cl,:hՂNB"C^~.n,"NG.zc2jb7Xƶň % ^.nJ?DKVfʃ,/S)].`:2= ]EXE>H{ Xt`qܹϟ/={~xxx7Do*|]k=&teAN:mj>:EO![N>EqwpJx CqḬlRR4TƋ k ¯Ɍr тTRF0r,zp9pG%4oo~)dߞn ƅ*X攝D4Ta3'XJB%Qj`.KPˋ9i M&[Yp{yq,Y\0*9a=!aON:`AB,l"mӏWr~ţcah|4$ꔠ`Bo3 {e5;Q#m.擏810I"'S33~^fӘah#18mkSp`rGZ(D}woo5+p_ +9[Kc;91czꅤ$pq>|*OJJ\ uX*:e> Q+ƌ8c1>[f(h^ de 76ѯ=.]a6 y_Bzϡg#3.7 1P-FbM&'c3Иz >5<( /^ Kj(ǸO1wٟ|e=?6M~A q`lѩS'!XYJN&&<9soVC_=똧s-? !:bs". ۿϛ/R􅊔ʫQБ=ˋs ONS.VM>‘tװ}ؿe"_^5^оMNָ.W 6C':ep[ȶ)o۠m]h c'q&iI>7`=p!xhVEφ2HGa{T*{ӾߊVC&&iɲv.p)2br-ˈYk:NgϞ &y @L W?}@+8w4¤sщfF 'ڲ3S9HF@O4)PZMh1Q̞0 pQ!E/G ز`sa&^~fXh4|]D7pTZ͛z/^T 53ĉ,"{e[C}#+N!vr_b\ \*w2l%7=gadaR.^H7n\A ޗ$ - Lj,::Sycr67'nAp_A+Y'A>0|1ާWg`ΆH<'x*A_IS"PNkcoǓf!#&Ǿo,ċt(r3i"m7с{ 55 N\D"R"G"_gRdU2VޖSqI=?r2eȶD]Rhew pMyhZ=#hӓ&6-P*/w;R\\=i&UZg&&3|=)L5@mh`+  =R\V;ӍZˡP J|MKKCHHH_+믿^Ph•ijrJ@J>K gOBC.gGZ-LdyzcW q!pKa1h_vPtE^5km+d4FAwJd`iǝg]7,)H/n@1 7Ȝ!w;CFq{Eƫ-ɭpÇZt+IF zm vAFZXԆAThO";l \vHftoTJ6PSwRrƀMvSfΪ%ҒU# ŋ.,VJ~C4T*R\VKe0fRفٯ*,^l2FF?qd__fޖ"*'ov(JH*VZ`E½<^?olz[wG8 7Z18'{322];aVun 1ǡ" +#3~{.ɍxq9.5s_VMv6Gy~ǘPʹȌ);+G>E|S>4G#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-km.G#s8yyyչAs8Bj y.G#P'plur|G6nmp,t9@ju&G#Pgn=8G#Ppҭ y@E9@[:o#pKΊ# [69:lʔ)ӫ:fD}$U-\OhZ #r4՛t.Y?p"ͭHbѦ…xe!-/_]oxDE"QxR5|\ing8]ܢ>d:jpv]9kMDMTMtȼQ Rq!1psqxÔeɦK/m804sayF ,YWcu DMkYaIߢ#GwgSp&"V(OaRa_7`?ķ"lQk\Nȸr+~ك[LW^w CQ#OCj$6l(=㥁Q8}KV}6"öduSLꫛj2dFaǎs4*ծŞp݋xUkشgM3k[KӜ[@)xl&!͟#c ZʧN0k"lV CDҁHt`12^$XL6]1wV7 R~ҟtp vGxcʳh+8f6$w'Ӟ*Y̮Pa܊9<=4/}k |MP[D??|e_ϫO˘0qHsK<1TO=$_jF¾q,^ v2b(Zkǖ}r 4n=26`Q`mBG*s[Ȃ͕'-/@Ett /2Dz%D;yxa` ",DYKTU8k$褰!awU1Hv͌ m}1v0 oÑ/w||}%DϨ=4ƀ^a”W81ra4ޱ O›<`p4]һ>-%t'`!'W'|t59*֯ .!H?1~@S$Dg!'0ok8[2m h  \ Nmؓe1rHsIYvPTѾC!,z >b0Z+q`8-4&5IȒ*i5f^GVB>T0K6z%6.sQ/C1bpONᗅ{`0(wʕЭPk:R6FhBd":Ҥ%زy%y\]4eBDGiK$[HUu1#е-#|֟"ԄHw#Ȉˑκ˩(ɇwסA#?S{h'\}(YQY0F 6p}Q#,|4_`IšP7t8 F- םi._i&Mg@LAPiySpխwm$QMՔ|rU{ KHI%-C1\ˆt˰)W2P Md,a#T:Qx 9ߦZ\s'LfA>[5&Mo£W'Q B " q1't"񮐑QKY˷yyUDC;"ڤB O/;Çggd \'!2<Ύe q1pUzbCå'iV#KxM¬ 4)7֞ z[ѭ[s@v)]BKKS*읤VF6fIVbs&4莽!ȴ6sg sx[9l0p#3ѡl̀ JgAR3 F3dMKdYìuXlr_udMȻ!?f|`IF(ĥg"W$ꎩS?YJڶrlsçtܳk&R0,bE<=;$bTv_F2+ԭ(rxȿoo!+a[I[.`EC/3aĠIDATF6`!vdI#:T ZT=$&{+-O^#XA>c 01x(`61ʠ\g/IkM"݊J/.\EK[4i@_ĥ,훉hZ5T=:df?ʒ47n@{mFL-\-yM4X>'6icTr9lthJGN}gr>\u#O/.d':F6$vgp&70eڹ;j?B;W*s=lɇ#m5Х>d aF~0 ޭ >e"4XPG^Cf1f`D=Ef@@61X?{26CmukhDii/( ;H#$2h)aQz#1hfˉ YKO im,?-(aoak&*"HI+q0<ӔR 5$ ! /Hw"\4~1߯u@9LGAeñ+7KE{KȲ!:ՆaP،ɡе\:1oJݣ`ީoȥcu bBGzBWZ $^!g67: c tf q$&_p=IB-4"{8W趒#zlBZ%RhEk5!Gk4(F'4 ͪe}-.PuKV`s@|^+D=.xێ.#6)Ei* i,<ĢFڡ)Vjq%|;-Phk&v!_ޱ㈊?oSc0HT#[ Tcv$zھ-OFd3[ 7PѷڊS.#!Uqe8pƍ4諔F`#I žHػvñq6RMdÉq*җHXgk1d mmT@R)[X vI2!d0˒yÍpv;0 if" YidD!YӬ3▧PgJȓ{%!rP]PN!CV:X[iAxx2BAvͰoWf:4Ul$+Š"RgCoڦ-4p/HgQacfANDf5.],$ &&ӡ&] 9 B 5$ە_xaI6ƢBKm&=?߹OgQ!xE= :Ǹsss^6'_3Y0I!6€aKm|T[0hj:B4R⥧GgB/AT}۰!IiCۋ+\FlpJUw ]qOҤ" t:Y~, F"/ĨehE@ m:HMh{Q}lD17R4ep&mq+C[D/]V q[xN-lH7ɔ *49v0i{= hN=)B !~2rHXXB=4΂gyXFH8 E +EP _-("Cv|ċT͚ةKHS_Xf&Y8vEI 9h dP]z :.'M;D-ċccwM˓ 歡ƉYrm߁_* ++pCtJߍҰ ,Y{U!aeҲ^4{F&G4Y\XciG%hأTV:@kv i׈Z6e7wع2VڤP߉~©rkbhr\Tn]_fc-jxV4hۊ&sVj\?a$N'WRdt;$dLV;[GYJG~4d%ŚG&F֣t]k#X):j_`pgx!$7Uy`X{ARؙ@c=ca&ůN%ȅ:(h6Wь"H[Oؿ E,3N^'7O¼6'[KUDt ?gc JtA`V>rV:~^O_$ڙ9匉gѧoZNuIUti $3KB;fA4.ApT#z5Act9EY* <HSI_4!4Kװh vb-!3iGךp! hEYqڴ\#It[0vcX=p)>)/)m'dۆ?>b]yOM\1phmO}P/+^5h5p4W]paزO֠&6ȅY*\6dg#UZ,)#a=MpǘIHOeߙ²4b+CflZ [z?? = ^]111 V%߅ ɕTwWVWNt^MŐ@oZ+\_"o_=4w1#ڒ0PxEKwDǚѵ]؏Sm3hڈ'RT*ꀓ™ɂ#n䅕8tyS`QZіᓸ9d2`9+L>MΡ LeMKvGv]+hRQφƒUO6fdUC88~+e=9ܝc[ȍKA !6w沸tƂہ ^Bf;W5H^C?K4 (NidQ8)TӬFnYE,tH樗.Ɓ]tp"W]) rA5*`9ɹ^|PZf>NÛ"nNHf:tc DE)Y~f|JMr+sjQU/QxCub|M. [Г޴"2QJ((/DYYEI?vX0&)<|}lVAlK ZJד_ |-f'DbYt`&wV+|UtKĂ'YwZ:H4* J %ʂnTN@aW;%x1paLͿUSg My::WY:Ҕ*_wY(Ț%pYFrZ<b7ɇ~?MehdE{ix*# hSId3}n÷coK[95w?]sN W9ёӎOQ١܂2V2rWᏠk3t@ -݂ɢX;l :+Jݙ\ڛ\s"Ǵ&&}/o/[^@[f%~5[,A\GFm˲feK'\Vd"kTirMn .Mƿ;8su*$TVY䋑poo kJ U-ro~Z p/gRUkiKJf$h.kj)$\]'q0tJ 7NGA^Ąv} &O Lj_DUXʪӒ(c3|WںhkpYIݮ`e )V2d 3YB^5 )qXE%W[{ {ѿDWҲ4} zTYx|# UC5[:TI/_wrȭpM~XL[,E1w<$!!sJM p8@8G#P>U2V~}<#pA[Ó8@u#-FpA[Ó8@u#Iq8rrIGd-N$IENDB`nipy-0.6.1/doc/devel/guidelines/gitwash/forking_hell.rst000066400000000000000000000021611470056100100233260ustar00rootroot00000000000000.. highlight:: bash .. _forking: ====================================================== Making your own copy (fork) of nipy ====================================================== You need to do this only once. The instructions here are very similar to the instructions at https://help.github.com/forking/ |emdash| please see that page for more detail. We're repeating some of it here just to give the specifics for the `nipy`_ project, and to suggest some default names. Set up and configure a github account ===================================== If you don't have a github account, go to the github page, and make one. You then need to configure your account to allow write access |emdash| see the ``Generating SSH keys`` help on `github help`_. Create your own forked copy of `nipy`_ ====================================================== #. Log into your github account. #. Go to the `nipy`_ github home at `nipy github`_. #. Click on the *fork* button: .. image:: forking_button.png Now, after a short pause, you should find yourself at the home page for your own forked copy of `nipy`_. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/git_development.rst000066400000000000000000000003401470056100100240450ustar00rootroot00000000000000.. _git-development: ===================== Git for development ===================== Contents: .. toctree:: :maxdepth: 2 forking_hell set_up_fork configure_git development_workflow maintainer_workflow nipy-0.6.1/doc/devel/guidelines/gitwash/git_install.rst000066400000000000000000000011421470056100100231720ustar00rootroot00000000000000.. highlight:: bash .. _install-git: ============= Install git ============= Overview ======== ================ ============= Debian / Ubuntu ``sudo apt-get install git`` Fedora ``sudo yum install git`` Windows Download and install msysGit_ OS X Use the git-osx-installer_ ================ ============= In detail ========= See the git page for the most recent information. Have a look at the github install help pages available from `github help`_ There are good instructions here: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/git_intro.rst000066400000000000000000000010561470056100100226630ustar00rootroot00000000000000.. highlight:: bash ============== Introduction ============== These pages describe a git_ and github_ workflow for the `nipy`_ project. There are several different workflows here, for different ways of working with *nipy*. This is not a comprehensive git reference, it's just a workflow for our own project. It's tailored to the github hosting service. You may well find better or quicker ways of getting stuff done with git, but these should get you started. For general resources for learning git, see :ref:`git-resources`. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/git_links.inc000066400000000000000000000062461470056100100226170ustar00rootroot00000000000000.. This (-*- rst -*-) format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. git stuff .. _git: https://git-scm.com/ .. _github: https://github.com .. _github help: https://help.github.com .. _msysgit: https://git-scm.com/download/win .. _git-osx-installer: https://git-scm.com/download/mac .. _subversion: http://subversion.tigris.org/ .. _git cheat sheet: https://help.github.com/git-cheat-sheets/ .. _pro git book: https://progit.org/ .. _git svn crash course: https://git-scm.com/course/svn.html .. _network graph visualizer: https://github.com/blog/39-say-hello-to-the-network-graph-visualizer .. _git user manual: https://schacon.github.io/git/user-manual.html .. _git tutorial: https://schacon.github.io/git/gittutorial.html .. _git community book: https://git-scm.com/book/en/v2 .. _git ready: http://gitready.com/ .. _Fernando's git page: http://www.fperez.org/py4science/git.html .. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html .. _git concepts: https://www.sbf5.com/~cduan/technical/git/ .. _git clone: https://schacon.github.io/git/git-clone.html .. _git checkout: https://schacon.github.io/git/git-checkout.html .. _git commit: https://schacon.github.io/git/git-commit.html .. _git push: https://schacon.github.io/git/git-push.html .. _git pull: https://schacon.github.io/git/git-pull.html .. _git add: https://schacon.github.io/git/git-add.html .. _git status: https://schacon.github.io/git/git-status.html .. _git diff: https://schacon.github.io/git/git-diff.html .. _git log: https://schacon.github.io/git/git-log.html .. _git branch: https://schacon.github.io/git/git-branch.html .. _git remote: https://schacon.github.io/git/git-remote.html .. _git rebase: https://schacon.github.io/git/git-rebase.html .. _git config: https://schacon.github.io/git/git-config.html .. _why the -a flag?: http://gitready.com/beginner/2009/01/18/the-staging-area.html .. _git staging area: http://gitready.com/beginner/2009/01/18/the-staging-area.html .. _tangled working copy problem: http://2ndscale.com/rtomayko/2008/the-thing-about-git .. _git management: https://web.archive.org/web/20090224195437/http://kerneltrap.org/Linux/Git_Management .. _linux git workflow: https://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html .. _git foundation: https://matthew-brett.github.io/pydagogue/foundation.html .. _deleting master on github: https://matthew-brett.github.io/pydagogue/gh_delete_master.html .. _rebase without tears: https://matthew-brett.github.io/pydagogue/rebase_without_tears.html .. _resolving a merge: https://schacon.github.io/git/user-manual.html#resolving-a-merge .. _ipython git workflow: https://mail.scipy.org/pipermail/ipython-dev/2010-October/006746.html .. other stuff .. _python: https://www.python.org .. |emdash| unicode:: U+02014 .. vim: ft=rst nipy-0.6.1/doc/devel/guidelines/gitwash/git_resources.rst000066400000000000000000000032631470056100100235440ustar00rootroot00000000000000.. highlight:: bash .. _git-resources: ============= git resources ============= Tutorials and summaries ======================= * `github help`_ has an excellent series of how-to guides. * The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. * The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ |emdash| a nice series of tutorials * `git magic`_ |emdash| extended introduction with intermediate detail * The `git parable`_ is an easy read explaining the concepts behind git. * `git foundation`_ expands on the `git parable`_. * Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many links and tips * A good but technical page on `git concepts`_ * `git svn crash course`_: git for those of us used to subversion_ Advanced git workflow ===================== There are many ways of working with git; here are some posts on the rules of thumb that other projects have come up with: * Linus Torvalds on `git management`_ * Linus Torvalds on `linux git workflow`_ . Summary; use the git tools to make the history of your edits as clean as possible; merge from upstream edits as little as possible in branches where you are doing active development. Manual pages online =================== You can get these on your own machine with (e.g) ``git help push`` or (same thing) ``git push --help``, but, for convenience, here are the online manual pages for some common commands: * `git add`_ * `git branch`_ * `git checkout`_ * `git clone`_ * `git commit`_ * `git config`_ * `git diff`_ * `git log`_ * `git pull`_ * `git push`_ * `git remote`_ * `git status`_ .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/index.rst000066400000000000000000000003531470056100100217730ustar00rootroot00000000000000.. _using-git: Working with *nipy* source code ================================================ Contents: .. toctree:: :maxdepth: 2 git_intro git_install following_latest patching git_development git_resources nipy-0.6.1/doc/devel/guidelines/gitwash/known_projects.inc000066400000000000000000000027101470056100100236710ustar00rootroot00000000000000.. Known projects .. PROJECTNAME placeholders .. _PROJECTNAME: http://nipy.org .. _`PROJECTNAME github`: https://github.com/nipy .. _`PROJECTNAME mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging .. numpy .. _numpy: http://www.numpy.org .. _`numpy github`: https://github.com/numpy/numpy .. _`numpy mailing list`: https://mail.scipy.org/mailman/listinfo/numpy-discussion .. scipy .. _scipy: https://www.scipy.org .. _`scipy github`: https://github.com/scipy/scipy .. _`scipy mailing list`: https://mail.scipy.org/mailman/listinfo/scipy-dev .. nipy .. _nipy: http://nipy.org/nipy/ .. _`nipy github`: https://github.com/nipy/nipy .. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging .. ipython .. _ipython: https://ipython.org .. _`ipython github`: https://github.com/ipython/ipython .. _`ipython mailing list`: https://mail.scipy.org/mailman/listinfo/IPython-dev .. dipy .. _dipy: http://nipy.org/dipy/ .. _`dipy github`: https://github.com/Garyfallidis/dipy .. _`dipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging .. nibabel .. _nibabel: http://nipy.org/nibabel/ .. _`nibabel github`: https://github.com/nipy/nibabel .. _`nibabel mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging .. marsbar .. _marsbar: http://marsbar.sourceforge.net .. _`marsbar github`: https://github.com/matthew-brett/marsbar .. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users nipy-0.6.1/doc/devel/guidelines/gitwash/links.inc000066400000000000000000000001611470056100100217420ustar00rootroot00000000000000.. compiling links file .. include:: known_projects.inc .. include:: this_project.inc .. include:: git_links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/maintainer_workflow.rst000066400000000000000000000060071470056100100247470ustar00rootroot00000000000000.. highlight:: bash .. _maintainer-workflow: ################### Maintainer workflow ################### This page is for maintainers |emdash| those of us who merge our own or other peoples' changes into the upstream repository. Being as how you're a maintainer, you are completely on top of the basic stuff in :ref:`development-workflow`. The instructions in :ref:`linking-to-upstream` add a remote that has read-only access to the upstream repo. Being a maintainer, you've got read-write access. It's good to have your upstream remote have a scary name, to remind you that it's a read-write remote:: git remote add upstream-rw git@github.com:nipy/nipy.git git fetch upstream-rw ******************* Integrating changes ******************* Let's say you have some changes that need to go into trunk (``upstream-rw/main``). The changes are in some branch that you are currently on. For example, you are looking at someone's changes like this:: git remote add someone git://github.com/someone/nipy.git git fetch someone git branch cool-feature --track someone/cool-feature git checkout cool-feature So now you are on the branch with the changes to be incorporated upstream. The rest of this section assumes you are on this branch. A few commits ============= If there are only a few commits, consider rebasing to upstream:: # Fetch upstream changes git fetch upstream-rw # rebase git rebase upstream-rw/main Remember that, if you do a rebase, and push that, you'll have to close any github pull requests manually, because github will not be able to detect the changes have already been merged. A long series of commits ======================== If there are a longer series of related commits, consider a merge instead:: git fetch upstream-rw git merge --no-ff upstream-rw/main The merge will be detected by github, and should close any related pull requests automatically. Note the ``--no-ff`` above. This forces git to make a merge commit, rather than doing a fast-forward, so that these set of commits branch off trunk then rejoin the main history with a merge, rather than appearing to have been made directly on top of trunk. Check the history ================= Now, in either case, you should check that the history is sensible and you have the right commits:: git log --oneline --graph git log -p upstream-rw/main.. The first line above just shows the history in a compact way, with a text representation of the history graph. The second line shows the log of commits excluding those that can be reached from trunk (``upstream-rw/main``), and including those that can be reached from current HEAD (implied with the ``..`` at the end). So, it shows the commits unique to this branch compared to trunk. The ``-p`` option shows the diff for these commits in patch form. Push to trunk ============= :: git push upstream-rw my-new-feature:main This pushes the ``my-new-feature`` branch in this repository to the ``main`` branch in the ``upstream-rw`` repository. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/patching.rst000066400000000000000000000077021470056100100224660ustar00rootroot00000000000000.. highlight:: bash ================ Making a patch ================ You've discovered a bug or something else you want to change in `nipy`_ .. |emdash| excellent! You've worked out a way to fix it |emdash| even better! You want to tell us about it |emdash| best of all! The easiest way is to make a *patch* or set of patches. Here we explain how. Making a patch is the simplest and quickest, but if you're going to be doing anything more than simple quick things, please consider following the :ref:`git-development` model instead. .. _making-patches: Making patches ============== Overview -------- :: # tell git who you are git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" # get the repository if you don't have it git clone git://github.com/nipy/nipy.git # make a branch for your patching cd nipy git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' # make the patch files git format-patch -M -C main Then, send the generated patch files to the `nipy mailing list`_ |emdash| where we will thank you warmly. In detail --------- #. Tell git who you are so it can label the commits you've made:: git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" #. If you don't already have one, clone a copy of the `nipy`_ repository:: git clone git://github.com/nipy/nipy.git cd nipy #. Make a 'feature branch'. This will be where you work on your bug fix. It's nice and safe and leaves you with access to an unmodified copy of the code in the main branch:: git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of #. Do some edits, and commit them as you go:: # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag |emdash| you can just take on faith |emdash| or see `why the -a flag?`_. #. When you have finished, check you have committed all your changes:: git status #. Finally, make your commits into patches. You want all the commits since you branched from the ``main`` branch:: git format-patch -M -C main You will now have several files named for the commits: .. code-block:: none 0001-BF-added-tests-for-Funny-bug.patch 0002-BF-added-fix-for-Funny-bug.patch Send these files to the `nipy mailing list`_. When you are done, to switch back to the main copy of the code, just return to the ``main`` branch:: git checkout main Moving from patching to development =================================== If you find you have done some patches, and you have one or more feature branches, you will probably want to switch to development mode. You can do this with the repository you have. Fork the `nipy`_ repository on github |emdash| :ref:`forking`. Then:: # checkout and refresh main branch from main repo git checkout main git pull origin main # rename pointer to main repository to 'upstream' git remote rename origin upstream # point your repo to default read / write to your fork on github git remote add origin git@github.com:your-user-name/nipy.git # push up any branches you've made and want to keep git push origin the-fix-im-thinking-of Then you can, if you want, follow the :ref:`development-workflow`. .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/pull_button.png000066400000000000000000000311351470056100100232110ustar00rootroot00000000000000PNG  IHDR~\iu pHYs   IDATx]|ToߔMHH PKT,"S, O}%"]zHR@dw޽fIBB$a&{3s̙3s72p8 G# ?G#p!}p]G#?G#p!}p]G#?G#p!HKKCVVVuNPF C./y>xbpˏUVe͛7׭=Obyyy()6c\,4lYѬ)4,= {$H& 0,n8^CtqѸ˱gEsZzl6~?vu;Oͽyv+gkx G#}كSԩSª8xy$;w}Z*.6.O[B$ɴ r)ɝĻB? Z}|?ìqu'.=IXHG.A0[rgcǎN,YDx_>kƮG όSN3^ǶQX+PmY+ˁ\K_,?~\#GBPN$,"E؟ A~ǧGLVs',o<5#3( fa0`9UI l>`ŒOлa彣p0e3ggx~Zb>1cƠiӦE*(O_˝)ޝ:8G| < 'g>!A>; oxMۍ9gH0taق.IFp̟3W6bά|`xhs &އĶ?EK+X4|ad#,0+VCSȦٳоq?$` 0ot$WRsp-.vx3k;~ŒO >`4|H>Og,O5E 4}LZ'Š&^xz` hXl,CbM8ִvXI:?? $̯?c{+]DJ___L4 ׿h5 &O  ^d g|tU"?5yɟ0pwX&<[?GWirToV|){<3mLAmxs 6'@9l>.#0}H4tŅx-w}__n Ơu~1b{֮҇#`4|(6y{fm^g][l?2wagsxqt'`oqѪ!!CMcZxL8ϦmE)*ue/fJ{ ž{LhϞ=WZˀ8y36 ‘3ݟL?w $ZMsѬ^ tɉ)"NKto+Щw+N7oG@2_ >tł@Y``0<\"aJ)Ϭ &57"&I! Rs=oֱ8) V8ޑxkme: #Hxӧp8ƿRh,@ #膣kw )@Ȥ r-e0gG|)o$b}"1bPddpyC#W.?E'\lǂ\Zز\m4rognYr2y^ /&욟*SY|!ڜi&rE|v9#' eH\|=0i;@F%!_Am62bK ? ( #`H.Ң #ӑݐ`6ֿfO< ͨOzde/mȄ<@soΛXyɒKcs30oOIZM}5 R-AnJiz$HlY3"1j^8A: GZE_ErrV $1W,PߐT?M!1v\w'_w :\db5nMQ^&SZ ,h׮Knq7mP?څ3PBK2}G6IjwMF:,`*Lъ<-r$PjZaY4l/,y&q-[$e]u߾}'ABWՂgyL&%1/H}0 3wSJ;Fc~1{NĿ?\ꂠ8\"`}H`5%zf40>-|ڊyJw-<˖-1}tt ؿ?q7ni#)}ӡd:җ؁hn)U8D_Nҗ"z@vM$Y/ kg߲CC0[mğev= Lg!Kĩ֮#_<*JzPp. ˌA"ө)(5K@0Mm?_@~}'v U8k ~Z(tn@Pu/71fM |_7P>1/fGn, K/)_QWKcAtهK嗰EVG,]0>\!dD6]4}.>_F MX5GfNY {;Ga1 Z*,Rf-Nslff*e0gJ1)#36fKGzܰ W&}9cpNA-[XP&ɹ[u.lNw}M=]gT̚ghSOgϞ`&eؤ) |4xnwH{ n>X'5!ȋQ(0Mз"d]H001\ka/<[`0->BkG8.ТVUo[1㥓l&IUF˾B["^.W".=REIiWiMb6WW/.MDvT~kg ;DY(4(sy#jᓟephn×/?Wx,j[ П|\Ld1q@)2j8ˊ}'`㝑|v ( j:/}Za;,Tw(pDQBU2Z}d.s^_|~j?^4IaA'¯vW>TaS I) ֖4$^+0ܹ^=NѺs ""]tg:ea3YLN˳g/tZ9llv4AAslnf6L4Ծbf"^}ϱS Y9P{“].ã䞠{'śrx)帬V _cO%U8c\LdҗVHe =S9SnLH'?lm|,:Z( cq8@,o]UW8UKi<+{? f˘0`X.pnPⱹWS8nL8ftv\+­=bcNIU:%qK^L<+B-p8!PiX\)p8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#Peʲpʇ+Kq8@D@YF*I8'#pʇw7^#TY p8#-cKp8*WU}xG#PvGJ#-*>NP{@)#mq57 B"hC<jE@O>ǧ"ӪC:9ш /1g.%O_V0$t3D& S݆WT`3kA-sprNdlg.Ν8x"g/!Eo.0nUՆ+ZjUW*@\=SXXe(^l0T:2hVquSM(lx:}0^su*i 7 ,I&g-51xk/cx2S&2[qyZHAͩ#ɑz[Pw ~!} _lH2pm0q;W۶q6_1\=D"|+`-'B-09Tyy0 "u Aj>{`ƄpmȭWw1XXZM90t$Q6h@<FsxE(٤mҮ/}|%fg7е7IAvAPz^TTKu8L: ځ,{+NZ*(Vw_Bn+oAxbPSĬɊ-ʋB_zeʠhƕ qB뗐Њ$㲞CzegڪZO$<g_b̛F>-rOG`V'%|<|࡮x y-U rYѦfdLYǾy8dAZ>\ IfEnA$_Nͳ/[liQyNxvyC;S[>SXm1dqd4v:mS`- ܈6tAx5nCfy9A7Nnʭ kja&5Rnhۼ6^6䦜ǁh3|zc՘W)0;BHwb9bj4 cuɰhjt/nF蓮!mHPHs`"nfVbl8l#ߠ@ {/|3cGw|g,h42G`Ӯ|l7aCwa\]T߉ 7"9=$FNm:6ݒ!c)v5.gBgNϧV㔃+Pw yF+D_ҝ EzqLhIf/@1[Ix}!⃺uB?m"A VD{lŔycۡtxb]?ca6JZIbvby-z==\dryW)ih__:(\JNESOgL"VtjڹkuImvHtUBe_h^&*oM6hGN>g WCaBU|A _ I+,N,ɉ~ʕF)2\ju~]8kGP^AwԈ9ʇum %O6B\ti-%IT2ꃊr*Cm& o^ f2f쒘pF6r%߾tzfdQ OQ2 hZKwV&v@taa iZV~ jOadtNC},ŏ4<RdL'Sԥ9s>ǡa,Jm{|˨CLN]3>^| vM.\5|xU wnsI,!Τ-gI<4 nۣ 1f4GOJ~F\VD؜A<)IaR$1tX6{y _?x$ ;6\KC=;!keؖpMhBT1X9]l A&-q4 ž|Yj:/; PÓ͂bdٱILMQjb =)7M7@ >G'5j4abwTm |9٠9!mʻ@ w+ۀVJh-7iĽ>;$ \V&·/>:=C+:joxFjobpH;;x'#%nȋJ$9nYioP.eӠ1Ypr>̦ oH ^Bs@jr)l+kl7QWSg]-u9[ӈՑIU.d+)/'Ъ jFw$@20HV~$˛')^ЫI-l}V z'&h`űbi.2b J+W"lR*d f$He bɔ)D{2E= u yS/,oe@t6Gځ+Fk 4UZ됰{-"ikKpq ώ u9yG; bYR~S&@tmļԀMBHygCju/.Ykس4c|:(>*6mBjڐ&lM4X.mv6\[lS)o"jegGʸ4@|d:tjH.*ъ x=ŷj 4$e%z FN8џEgD׎K( _kҥȩF~JeY.i7!%5O߫cm'lŶN6ܔl$ɊfD9hP3;okJ-RY~uʒ{?$Zܶ;۱yՏ$:ڣK;r;zCbwād >vvQ 6at܆BDȗ*XMTcKVGӪ. >,o;~g&M~)?lFRv_;r1gbK5$_5::EuZsZG)&rЊMn.@gT"T:5,JVC`R]Ehi~ 2߶hk'5!h߆k[&xB4v>EMWBb<iR+UŖ)$K'Ϯ3pSb7\8X0: <;+K#;Yv#^;7҆ZtBZш՘Xj,8(G0S[ gbI T&(dt Vn O t…NAF*Qϓtj#+)e& LdȾSx 2K.Ҳ ]?tj rrjCm#-ϡ{.M9d,de8_7,mf$i3O(lI IɆл 5$PER Cg1~wf )kVzփϥ7csb7?ƳSn>yH̫TShj$R:;o .<Ѫfڌ;^EapiWSGJ6#>|nV7ՄԠpǟxL{.鴡)kK~b$|& r's;% '* ;mAӬ?g~ݒ<|}=p)LW|>}LIRT/M}twI4mP{Ċuo" vZ ,7:t`J!6%) E ge4)ZF,{ MX%Ft8W23- ^ y/pQP2ZSyfRC1.6ꈼ>^l$ཹo9"l$Crr#6R $36iXiEG ╭_cYd;̚=3!k-Nr䲘dF #UəPFPBxKI+\;&{&{eTR%}@Y3rr)|wE1`h׬&RF\C1T~4j%3&lgz dQ4#-ߝYzr/! zjI^p8@+ hWp8:youSp8eA[eAp8nW&.p8 -r8jWՀ G,pWOYy9@5@[Հ G,?V#9}nIENDB`nipy-0.6.1/doc/devel/guidelines/gitwash/set_up_fork.rst000066400000000000000000000037351470056100100232130ustar00rootroot00000000000000.. highlight:: bash .. _set-up-fork: ================== Set up your fork ================== First you follow the instructions for :ref:`forking`. Overview ======== :: git clone git@github.com:your-user-name/nipy.git cd nipy git remote add upstream git://github.com/nipy/nipy.git In detail ========= Clone your fork --------------- #. Clone your fork to the local computer with ``git clone git@github.com:your-user-name/nipy.git`` #. Investigate. Change directory to your new repo: ``cd nipy``. Then ``git branch -a`` to show you all branches. You'll get something like: .. code-block:: none * main remotes/origin/main This tells you that you are currently on the ``main`` branch, and that you also have a ``remote`` connection to ``origin/main``. What remote repository is ``remote/origin``? Try ``git remote -v`` to see the URLs for the remote. They will point to your github fork. Now you want to connect to the upstream `nipy github`_ repository, so you can merge in changes from trunk. .. _linking-to-upstream: Linking your repository to the upstream repo -------------------------------------------- :: cd nipy git remote add upstream git://github.com/nipy/nipy.git ``upstream`` here is just the arbitrary name we're using to refer to the main `nipy`_ repository at `nipy github`_. Note that we've used ``git://`` for the URL rather than ``git@``. The ``git://`` URL is read only. This means we that we can't accidentally (or deliberately) write to the upstream repo, and we are only going to use it to merge into our own code. Just for your own satisfaction, show yourself that you now have a new 'remote', with ``git remote -v show``, giving you something like: .. code-block:: none upstream git://github.com/nipy/nipy.git (fetch) upstream git://github.com/nipy/nipy.git (push) origin git@github.com:your-user-name/nipy.git (fetch) origin git@github.com:your-user-name/nipy.git (push) .. include:: links.inc nipy-0.6.1/doc/devel/guidelines/gitwash/this_project.inc000066400000000000000000000001701470056100100233170ustar00rootroot00000000000000.. nipy .. _`nipy`: http://nipy.org/nipy .. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging nipy-0.6.1/doc/devel/guidelines/howto_document.rst000066400000000000000000000050611470056100100222550ustar00rootroot00000000000000.. _howto_document: ============================ How to write documentation ============================ Nipy_ uses the Sphinx_ documentation generating tool. Sphinx translates reST_ formatted documents into html and pdf documents. All our documents and docstrings are in reST format, this allows us to have both human-readable docstrings when viewed in ipython_, and web and print quality documentation. ========================== Getting build dependencies ========================== Building the documentation -------------------------- You need to have Sphinx_ (version 0.6.2 or above) and graphviz_ (version 2.20 or greater). The ``Makefile`` (in the top-level doc directory) automates the generation of the documents. To make the HTML documents:: make html For PDF documentation do:: make pdf The built documentation is then placed in a ``build/html`` or ``build/latex`` subdirectories. For more options, type:: make help Viewing the documentation ------------------------- We also build our website using sphinx_. All of the documentation in the ``docs`` directory is included on the website. There are a few files that are website only and these are placed in the ``www`` directory. The easiest way to view the documentation while editing is to build the website and open the local build in your browser:: make web Then open ``www/build/html/index.html`` in your browser. Syntax ------ Please have a look at our :ref:`sphinx_helpers` for examples on using Sphinx_ and reST_ in our documentation. The Sphinx website also has an excellent `sphinx rest`_ primer. Additional reST references:: - `reST primer `_ - `reST quick reference `_ Consider using emacs for editing rst files - see :ref:`rst_emacs` Style ----- Nipy has adopted the numpy_ documentation standards. The `numpy coding style guideline`_ is the main reference for how to format the documentation in your code. It's also useful to look at the `source reST file `_ that generates the coding style guideline. Numpy has a `detailed example `_ for writing docstrings. .. _`numpy coding style guideline`: http://scipy.org/scipy/numpy/wiki/CodingStyleGuidelines Documentation Problems ---------------------- See our :ref:`documentation_faq` if you are having problems building or writing the documentation. .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/guidelines/index.rst000066400000000000000000000005561470056100100203320ustar00rootroot00000000000000.. _development_guidelines: ======================== Development Guidelines ======================== .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 howto_document sphinx_helpers gitwash/index commit_codes testing debugging build_debug optimization open_source_devel make_release changelog nipy-0.6.1/doc/devel/guidelines/make_release.rst000066400000000000000000000111131470056100100216270ustar00rootroot00000000000000.. _release-guide: *********************************** A guide to making a nipy release *********************************** A guide for developers who are doing a nipy release .. _release-checklist: Release checklist ================= * Review the open list of `nipy issues`_. Check whether there are outstanding issues that can be closed, and whether there are any issues that should delay the release. Label them ! * Review and update the release notes. Review and update the :file:`Changelog` file. Get a partial list of contributors with something like:: PREV_RELEASE=0.5.0 git log $PREV_RELEASE.. | grep '^Author' | cut -d' ' -f 2- | sort | uniq where ``0.5.0`` was the last release tag name. Then manually go over ``git shortlog $PREV_RELEASE..`` to make sure the release notes are as complete as possible and that every contributor was recognized. * Use the opportunity to update the ``.mailmap`` file if there are any duplicate authors listed from ``git shortlog -ns``. * Add any new authors to the ``AUTHOR`` file. Add any new entries to the ``THANKS`` file. * Check the copyright years in ``doc/conf.py`` and ``LICENSE`` * Check the output of:: rst2html.py README.rst > ~/tmp/readme.html because this will be the output used by PyPI_ * Check the dependencies listed in ``pyproject.toml`` and in ``requirements.txt`` and in ``doc/users/installation.rst``. They should at least match. Do they still hold? Make sure ``.github/workflows`` is testing these minimum dependencies specifically. * Check the examples. First download the example data by running something like:: # Install data packages. pip install https://nipy.org/data-packages/nipy-templates-0.3.tar.gz pip install https://nipy.org/data-packages/nipy-data-0.3.tar.gz Then run the tests on the examples with:: # Move out of the source directory. cd .. # Make log file directory. mkdir ~/tmp/eg_logs ./nipy/tools/run_log_examples.py nipy/examples --log-path=~/tmp/eg_logs in a virtualenv. Review the output in (e.g.) ``~/tmp/eg_logs``. The output file ``summary.txt`` will have the pass file printout that the ``run_log_examples.py`` script puts onto stdout while running. * Check the documentation doctests pass:: virtualenv venv venv/bin/activate pip install -r doc-requirements.txt pip install -e . (cd docs && make clean-doctest) * Check the doc build:: virtualenv venv venv/bin/activate pip install -r doc-requirements.txt pip install -e . (cd docs && make html) * Build and test the Nipy wheels. See the `wheel builder README `_ for instructions. In summary, clone the wheel-building repo, edit the ``.github/workflow`` text files (if present) with the branch or commit for the release, commit and then push back up to github. This will trigger a wheel build and test on macOS, Linux and Windows. Check the build has passed on on the Github interface at https://travis-ci.org/MacPython/nipy-wheels. You'll need commit privileges to the ``nipy-wheels`` repo; ask Matthew Brett or on the mailing list if you do not have them. Doing the release ================= * The release should now be ready. * Edit :file:`nipy/__init__.py` to set ``__version__`` to e.g. ``0.6.0``. Edit :file:`meson.build` to set ``version`` to match. Commit, then:: make source-release * For the wheel build / upload, follow the `wheel builder README`_ instructions again. Push. Check the build has passed on the Github interface. Now follow the instructions in the page above to download the built wheels to a local machine and upload to PyPI. * Once everything looks good, you are ready to upload the source release to PyPI. See `setuptools intro`_. Make sure you have a file ``\$HOME/.pypirc``, of form:: [pypi] username = __token__ * Sign and upload the source release to PyPI using Twine_:: gpg --detach-sign -a dist/nipy*.tar.gz twine upload dist/nipy*.tar.gz* * Tag the release with tag of form ``0.6.0``. `-s` below makes a signed tag:: git tag -s 'Second main release' 0.6.0 * Now the version number is OK, push the docs to github pages with:: make upload-html * Start the new series. Edit ``nipy/__init__.py`` and set version number to something of form:: __version__ = "0.6.1.dev1" where ``0.6.0`` was the previous release. * Push tags:: git push --tags * Announce to the mailing lists. .. _twine: https://pypi.python.org/pypi/twine .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/guidelines/open_source_devel.rst000066400000000000000000000007551470056100100227240ustar00rootroot00000000000000.. _open_source_devel: ========================= Open Source Development ========================= For those interested in more info about contributing to an open source project, Here are some links I've found. They are probably no better or worse than other similar documents: * `Software Release Practice HOWTO `_ * `Contributing to Open Source Projects HOWTO `_ nipy-0.6.1/doc/devel/guidelines/optimization.rst000066400000000000000000000030641470056100100217460ustar00rootroot00000000000000.. _optimization: ============== Optimization ============== In the early stages of NIPY development, we are focusing on functionality and usability. In regards to optimization, we benefit **significantly** from the optimized routines in scipy_ and numpy_. As NIPY progresses it is likely we will spend more energy on optimizing critical functions. In our `py4science group at UC Berkeley `_ we've had several meetings on the various optimization options including ctypes, weave and blitz, and cython. It's clear there are many good options, including standard C-extensions. However, optimized code tends to be less readable and more difficult to debug and maintain. When we do optimize our code we will first profile the code to determine the offending sections, then optimize those sections. Until that need arises, we will follow the great advice from these fellow programmers: Kent Beck: "First make it work. Then make it right. Then make it fast." `Donald Knuth on optimization `_: "We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil." Tim Hochberg, from the Numpy list:: 0. Think about your algorithm. 1. Vectorize your inner loop. 2. Eliminate temporaries 3. Ask for help 4. Recode in C. 5. Accept that your code will never be fast. Step zero should probably be repeated after every other step ;) .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/guidelines/sphinx_helpers.rst000066400000000000000000000151151470056100100222530ustar00rootroot00000000000000.. _sphinx_helpers: ==================== Sphinx Cheat Sheet ==================== Wherein I show by example how to do some things in Sphinx (you can see a literal version of this file below in :ref:`sphinx_literal`) .. _making_a_list: Making a list ------------- It is easy to make lists in rest Bullet points ^^^^^^^^^^^^^ This is a subsection making bullet points * point A * point B * point C Enumerated points ^^^^^^^^^^^^^^^^^ This is a subsection making numbered points #. point A #. point B #. point C .. _making_a_table: Making a table -------------- This shows you how to make a table -- if you only want to make a list see :ref:`making_a_list`. ================== ============ Name Age ================== ============ John D Hunter 40 Cast of Thousands 41 And Still More 42 ================== ============ .. _making_links: Making links ------------ Cross-references sections and documents ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use reST labels to cross-reference sections and other documents. The mechanism for referencing another reST document or a subsection in any document, including within a document are identical. Place a *reference label* above the section heading, like this:: .. _sphinx_helpers: ==================== Sphinx Cheat Sheet ==================== Note the blank line between the *reference label* and the section heading is important! Then refer to the *reference label* in another document like this:: :ref:`sphinx_helpers` The reference is replaced with the section title when Sphinx builds the document while maintaining the linking mechanism. For example, the above reference will appear as :ref:`sphinx_helpers`. As the documentation grows there are many references to keep track of. For documents, please use a *reference label* that matches the file name. For sections, please try and make the *reference label* something meaningful and try to keep abbreviations limited. Along these lines, we are using *underscores* for multiple-word *reference labels* instead of hyphens. Sphinx documentation on `Cross-referencing arbitrary locations `_ has more details. External links ^^^^^^^^^^^^^^ For external links you are likely to use only once, simple include the like in the text. This link to `google `_ was made like this:: `google `_ For external links you will reference frequently, we have created a ``links_names.txt`` file. These links can then be used throughout the documentation. Links in the ``links_names.txt`` file are created using the `reST reference `_ syntax:: .. _targetname: http://www.external_website.org To refer to the reference in a separate reST file, include the ``links_names.txt`` file and refer to the link through it's target name. For example, put this include at the bottom of your reST document:: .. include:: ../links_names.txt and refer to the hyperlink target:: blah blah blah targetname_ more blah Links to classes, modules and functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also reference classes, modules, functions, etc that are documented using the sphinx `autodoc `_ facilities. For example, see the module :mod:`matplotlib.backend_bases` documentation, or the class :class:`~matplotlib.backend_bases.LocationEvent`, or the method :meth:`~matplotlib.backend_bases.FigureCanvasBase.mpl_connect`. .. _ipython_highlighting: ipython sessions ---------------- Michael Droettboom contributed a sphinx extension which does pygments syntax highlighting on ipython sessions .. sourcecode:: ipython In [69]: lines = plot([1,2,3]) In [70]: setp(lines) alpha: float animated: [True | False] antialiased or aa: [True | False] ...snip This support is included in this template, but will also be included in a future version of Pygments by default. .. _formatting_text: Formatting text --------------- You use inline markup to make text *italics*, **bold**, or ``monotype``. You can represent code blocks fairly easily:: import numpy as np x = np.random.rand(12) Or literally include code: .. literalinclude:: elegant.py .. _using_math: Using math ---------- In sphinx you can include inline math :math:`x\leftarrow y\ x\forall y\ x-y` or display math .. math:: W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right] This documentation framework includes a Sphinx extension, :file:`sphinxext/mathmpl.py`, that uses matplotlib to render math equations when generating HTML, and LaTeX itself when generating a PDF. This can be useful on systems that have matplotlib, but not LaTeX, installed. To use it, add ``mathpng`` to the list of extensions in :file:`conf.py`. Current SVN versions of Sphinx now include built-in support for math. There are two flavors: - pngmath: uses dvipng to render the equation - jsmath: renders the math in the browser using Javascript To use these extensions instead, add ``sphinx.ext.pngmath`` or ``sphinx.ext.jsmath`` to the list of extensions in :file:`conf.py`. All three of these options for math are designed to behave in the same way. Inserting matplotlib plots -------------------------- Inserting automatically-generated plots is easy. Simply put the script to generate the plot in any directory you want, and refer to it using the ``plot`` directive. All paths are considered relative to the top-level of the documentation tree. To include the source code for the plot in the document, pass the ``include-source`` parameter:: .. plot:: devel/guidelines/elegant.py :include-source: In the HTML version of the document, the plot includes links to the original source code, a high-resolution PNG and a PDF. In the PDF version of the document, the plot is included as a scalable PDF. .. plot:: devel/guidelines/elegant.py :include-source: Emacs helpers ------------- See :ref:`rst_emacs` Inheritance diagrams -------------------- Inheritance diagrams can be inserted directly into the document by providing a list of class or module names to the ``inheritance-diagram`` directive. For example:: .. inheritance-diagram:: codecs produces: .. inheritance-diagram:: codecs .. _sphinx_literal: This file --------- .. literalinclude:: sphinx_helpers.rst nipy-0.6.1/doc/devel/guidelines/testing.rst000066400000000000000000000140231470056100100206720ustar00rootroot00000000000000.. _testing: ======= Testing ======= Nipy uses the the Pytest_ framework. If you plan to do development on nipy please have a look at the `Pytest docs `_ and read through the `numpy testing guidelines `_. .. _automated-testing: Automated testing ----------------- We run the tests on every commit with travis-ci_ |--| see `nipy on travis`_. We also have a farm of machines set up to run the tests on every commit to the ``main`` branch at `nipy buildbot`_. Writing tests ------------- Test files ^^^^^^^^^^ We like test modules to import their testing functions and classes from the module in which they are defined. For example, we might want to use the ``assert_array_equal``, ``assert_almost_equal`` functions defined by ``numpy``, and the ``funcfile, anatfile`` variables from ``nipy``:: from numpy.testing import assert_array_equal, assert_almost_equal from nipy.testing import funcfile, anatfile Please name your test file with the ``test_`` prefix followed by the module name it tests. This makes it obvious for other developers which modules are tested, where to add tests, etc... An example test file and module pairing:: nipy/core/reference/coordinate_system.py nipy/core/reference/tests/test_coordinate_system.py All tests go in a ``tests`` subdirectory for each package. Temporary files ^^^^^^^^^^^^^^^ If you need to create a temporary file during your testing, you could use one of these three methods, in order of convenience: #. `StringIO `_ StringIO creates an in memory file-like object. The memory buffer is freed when the file is closed. This is the preferred method for temporary files in tests. #. `in_tmp_path` Pytest fixture. This is a convenient way of putting you into a temporary directory so you can save anything you like into the current directory, and feel fine about it after. Like this:: def test_func(in_tmp_path): f = open('myfile', 'wt') f.write('Anything at all') f.close() One thing to be careful of is that you may need to delete objects holding onto the file before you exit the enclosing function, otherwise Windows may refuse to delete the file. #. `tempfile.mkstemp `_ This will create a temporary file which can be used during testing. There are parameters for specifying the filename *prefix* and *suffix*. .. Note:: The tempfile module includes a convenience function *NamedTemporaryFile* which deletes the file automatically when it is closed. However, whether the files can be opened a second time varies across platforms and there are problems using this function on *Windows*. Example:: from tempfile import mkstemp try: fd, name = mkstemp(suffix='.nii.gz') tmpfile = open(name) save_image(fake_image, tmpfile.name) tmpfile.close() finally: os.unlink(name) # This deletes the temp file Please don't just create a file in the test directory and then remove it with a call to ``os.remove``. For various reasons, sometimes ``os.remove`` doesn't get called and temp files get left around. Many tests in one test function ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To keep tests organized, it's best to have one test function correspond to one class method or module-level function. Often though, you need many individual tests to thoroughly cover the method/function. For convenience, we often write many tests in a single test function. This has the disadvantage that if one test fails, the testing framework will not run any of the subsequent tests in the same function. This isn't a big problem in practice, because we run the tests so often (:ref:`automated-testing`) that we can quickly pick up and fix the failures. For axample, this test function executes four tests:: def test_index(): cs = CoordinateSystem('ijk') assert_equal(cs.index('i'), 0) assert_equal(cs.index('j'), 1) assert_equal(cs.index('k'), 2) assert_raises(ValueError, cs.index, 'x') Suppress *warnings* on test output ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order to reduce noise when running the tests, consider suppressing *warnings* in your test modules. See the `pytest documentation `_ for various ways to do that, or search our code for `pytest.mark` for examples. Running tests ------------- Running the full test suite ^^^^^^^^^^^^^^^^^^^^^^^^^^^ To run nipy's tests, you will need to pytest_ installed. Then:: pytest nipy You can run the full tests, including doctests with:: pip install pytest-doctestplus pytest --doctest-plus nipy Install optional data packages for testing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For our tests, we have collected a set of fmri imaging data which are required for the tests to run. To do this, download the latest example data and template package files from `NIPY data packages`_. See :ref:`data-files`. Running individual tests ^^^^^^^^^^^^^^^^^^^^^^^^ You can also run the tests from the command line with a variety of options. To test an individual module:: pytest nipy/core/image/tests/test_image.py To test an individual function:: pytest nipy/core/image/tests/test_image.py::test_maxmin_values To test a class:: pytest nipy/algorithms/clustering/tests/test_clustering.py::TestClustering To test a class method:: pytest nipy/algorithms/clustering/tests/test_clustering.py::TestClustering.testkmeans1 Verbose mode (*-v* option) will print out the function names as they are executed. Standard output is normally suppressed by Pytest, to see any print statements you must include the *-s* option. In order to get a "full verbose" output, call Pytest like this:: pytest -sv nipy To include doctests in the tests:: pytest -sv --docest-plus nipy .. _coverage: .. include:: ./coverage_testing.rst .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/images.rst000066400000000000000000000026131470056100100163340ustar00rootroot00000000000000=================== Describing images =================== Here we set out what we think an image is and how it should work in our code. We are largely following the nifti_ standard. What is an image? ================= An image is the association of a block (array) of spatial data, with the relationship of the position of that data to some continuous space. Therefore an image contains: * an array * a spatial transformation describing the position of the data in the array relative to some space. An image always has 3 spatial dimensions. It can have other dimensions, such as time. A slice from a 3D image is also a 3D image, but with one dimension of the image having length 1. The transformation is spatial and refers to exactly three dimensions. :: import numpy as np import neuroimaging as ni img = ni.load_image('example3d.img') arr = img.get_data() assert isinstance(arr, np.ndarray) xform = img.get_transform() voxel_position = [0, 0, 0] world_position = xform.apply(voxel_position) assert world_position.shape = (3,) An image has an array. The first 3 axes (dimensions) of that array are spatial. Further dimensions can have various meanings. The most common meaning of the 4th axis is time. The relationship of the first three dimensions to any particular orientation in space are only known from the image transform. .. include:: ../links_names.txt nipy-0.6.1/doc/devel/index.rst000066400000000000000000000004451470056100100161770ustar00rootroot00000000000000.. _developers-guide-index: ================= Developer Guide ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 development_quickstart install/index guidelines/index planning/index code_discussions/index tools/index nipy-0.6.1/doc/devel/install/000077500000000000000000000000001470056100100160015ustar00rootroot00000000000000nipy-0.6.1/doc/devel/install/debian.rst000066400000000000000000000024241470056100100177570ustar00rootroot00000000000000=================================== Debian / Ubuntu developer install =================================== Dependencies ------------ See :ref:`installation` for the installation instructions. Since NiPy is provided within stock distribution (``main`` component of Debian, and ``universe`` of Ubuntu), to install all necessary requirements it is enough to:: sudo apt-get build-dep python-nipy .. note:: Above invocation assumes that you have references to ``Source`` repository listed with ``deb-src`` prefixes in your apt .list files. Otherwise, you can revert to manual installation with:: sudo apt-get build-essential sudo apt-get install python-dev sudo apt-get install python-numpy python-numpy-dev python-scipy sudo apt-get install liblapack-dev sudo apt-get install python-sympy Useful additions ---------------- Some functionality in NiPy requires additional modules:: sudo apt-get install ipython sudo apt-get install python-matplotlib sudo apt-get install mayavi2 For getting the code via version control:: sudo apt-get install git-core Then follow the instructions at :ref:`trunk_download`. And for easier control of multiple Python modules installations (e.g. different versions of IPython):: sudo apt-get install virtualenvwrapper nipy-0.6.1/doc/devel/install/fedora.rst000066400000000000000000000012701470056100100177730ustar00rootroot00000000000000========================== Fedora developer install ========================== See :ref:`installation` This assumes a recent Fedora (>=10) version. It may work for earlier versions - see :ref:`installation` for requirements. This page may also hold for Fedora-based distributions such as Mandriva and Centos. Run all the ``yum install`` commands as root. Requirements:: yum install gcc-c++ yum install python-devel yum install numpy scipy yum install sympy yum install atlas-devel Options:: yum install ipython yum install python-matplotlib For getting the code via version control:: yum install git-core Then follow the instructions at :ref:`trunk_download` nipy-0.6.1/doc/devel/install/index.rst000066400000000000000000000004461470056100100176460ustar00rootroot00000000000000.. _distribution-installs: ================================================ Developer installs for different distributions ================================================ .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 debian fedora windows nipy-0.6.1/doc/devel/install/windows.rst000066400000000000000000000052761470056100100202370ustar00rootroot00000000000000================================ Development install on windows ================================ The easy way - a super-package ------------------------------ The easiest way to get the dependencies is to install PythonXY_ or the `Enthought Tool Suite`_ . This gives you MinGW_, Python_, Numpy_, Scipy_, ipython_ and matplotlib_ (and much more). The hard way - by components ---------------------------- If instead you want to do it by component, try the instructions below. Requirements: * Download and install MinGW_ * Download and install the windows binary for Python_ * Download and install the Numpy_ and Scipy_ binaries * Download and install Sympy_ Options: * Download and install ipython_, being careful to follow the windows installation instructions * Download and install matplotlib_ Alternatively, if you are very brave, you may want to install numpy / scipy from source - see our maybe out of date :ref:`windows_scipy_build` for details. Getting and installing NIPY --------------------------- You will next need to get the NIPY code via version control: * Download and install the windows binary for git_ * Go to the windows menu, find the ``git`` menu, and run ``git`` in a windows terminal. You should now be able to follow the instructions in :ref:`trunk_download`, but with the following modifications: Running the build / install --------------------------- Here we assume that you do *not* have the Microsoft visual C tools, you did not use the ETS_ package (which sets the compiler for you) and *are* using a version of MinGW_ to compile NIPY. First, for the ``python setup.py`` steps, you will need to add the ``--compiler=mingw32`` flag, like this:: python setup.py build --compiler=mingw32 install Note that, with this setup you cannot do inplace (developer) installs (like ``python setup.py build_ext --inplace``) because of a six-legged python packaging feature that does not allow the compiler options (here ``--compiler=mingw32``) to be passed from the ``build_ext`` command. If you want to be able to do that, add these lines to your ``distutils.cfg`` file :: [build] compiler=mingw32 [config] compiler = mingw32 See http://docs.python.org/install/#inst-config-files for details on this file. After you've done this, you can run the standard ``python setup.py build_ext --inplace`` command. The command line from Windows ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default windows XP command line ``cmd`` is very basic. You might consider using the Cygwin_ bash shell, or you may want to use the ipython_ shell to work in. For system commands use the ``!`` escape, like this, from the ipython prompt:: !python setup.py build --compiler=mingw32 .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/install/windows_scipy_build.rst000066400000000000000000000173361470056100100226250ustar00rootroot00000000000000.. _windows_scipy_build: Building Scipy/Numpy on Windows with Optimized Numerical Libraries ================================================================== This involves compiling several libraries (ATLAS, LAPACK, FFTW and UMFPACK) and then building `numpy `_ and `scipy `_ from SVN source. But as with most things Windows, this turns out to be a slightly tricky affair. The following has been tested on Windows Vista Enterprise 32bit only, but should theoretically work on other Windows platforms. It also used Python 2.5. Ideally, a big chunk of this page should move to the scipy/numpy site. And also ideally should become a single script. But it's also good to know exactly how you got there. Prerequisites ~~~~~~~~~~~~~ * You need Windows Vista enterprise/ultimate with `SUA `_ enabled and installed or Windows (others, including other Vista variants) with `Cygwin `_ installed. You cannot install the SUA package on a non enterprise or ultimate Vista edition. * MinGW (`installer `_) with gcc 3.4.5 (choose the candidate option when installing) and the `msys `_ environment installed. You will need to download the following packages for msys: * bzip2-1.0.3-MSYS-1.0.11-snapshot.tar.bz2 * coreutils-5.97-MSYS-1.0.11-snapshot.tar.bz2 * diffutils-2.8.7-MSYS-1.0.11-snapshot.tar.bz2 * gawk-3.1.5-MSYS-1.0.11-snapshot.tar.bz2 * make-3.81-MSYS-1.0.11-snapshot.tar.bz2 * msysCORE-1.0.11-2007.01.19-1.tar.bz2 * binutils-2.17.50-20070129-1.tar.gz Just unpack all the package contents in a single directory and copy them over to the MinGW installation directory. You may want to add the following to the system path: :: set PATH=[PATH TO]\MinGW;[PATH TO]\MinGW\libexec\gcc\mingw32\3.4.5;%PATH% * Numerical Libraries * `ATLAS latest developer version `_ * LAPACK `lapack 3.1 scroll down to Available software `_ * FFTW `fftw-3.1.2 `_ * UMFPACK `download UMFPACK, UFConfig, AMD `_ Installation ~~~~~~~~~~~~ * Create a directory called BUILDS, BUILDS/lib, BUILDS/include * Unpack all the numerical library files in BUILDS * Create subversion check out directories for scipy and numpy in BUILDS * Start SUA c-shell or cygwin shell * Start msys.bat:: PATH=/mingw/libexec/gcc/mingw32/3.4.5:$PATH; export PATH * Change directory to location of BUILDS. (/dev/fs/driveletter/... in SUA, /cygdrive/driveletter/... in cygwin, /driveletter/... in msys) Compiling ATLAS ^^^^^^^^^^^^^^^ * This is done in the SUA/Cygwin shell. In Cygwin you probably want to follow the instructions at `Installing Scipy on Windows `_ * ``cd ATLAS; mkdir build; cd build`` * Run `../configure` (This will probably fail but will leave you with xconfig) * Run `./xconfig --help` (to see all options) * Run `../configure -O 8 -A 16 -m 3189 -b 32` (replacing the values with your machine configuration) * Edit Make.inc to provide correct L2SIZE * Run `make` (leave your computer and go do something else for about an hour) Compiling LAPACK ^^^^^^^^^^^^^^^^ * This is done in the msys shell * `cd lapack_XX` * Copy make.inc.example to make.inc * Edit the following lines in make.inc:: PLAT = _NT OPTS = -funroll-all-loops -O3 -malign-double -msse2 BLASLIB = -L/driveletter/[PATH TO]/BUILDS/ATLAS/build/lib -lf77blas -latlas * Run `make lib` Combining LAPACK and ATLAS ^^^^^^^^^^^^^^^^^^^^^^^^^^ * Stay in the msys shell after compiling LAPACK * Go to the ATLAS/build/lib directory * Execute the following commands:: mkdir tmp; cd tmp cp ../liblapack.a ../liblapack_ATLAS.a ar -x ../liblapack.a cp [PATH TO]/lapack_NT.a ../liblapack.a ar -r ../liblapack.a *.o rm *.o ar -x ../liblapack.a xerbla.o ar -r ../libf77blas.a xerbla.o * Copy liblapack.a, libf77blas.a, libcblas.a, libatlas.a to BUILDS/lib * Copy the ATLAS/include to BUILDS/include/ATLAS Compiling UMFPACK ^^^^^^^^^^^^^^^^^ * Stay in msys shell * Goto UFconfig * Edit UFConfig/UFconfig.mk:: BLAS = -L/driveletter/[PATH TO]/BUILDS/lib -llapack -lf77blas -lcblas -latlas -lg2c LAPACK = -L/driveletter/[PATH TO]/BUILDS/lib -llapack -lf77blas -lcblas -latlas -lg2c XERBLA = * Run the following commands:: cd ..\AMD make cd ..\UMFPACK make * Copy libamd.a (from AMD), libumfpack.a (from UMFPACK) to BUILDS/lib * Copy UMFPACK/include to BUILDS/include/UMFPACK * Copy UFconfig/ufconfig.h to BUILDS/include * Copy AMD/include/amd.h to BUILDS/include Compiling fftw ^^^^^^^^^^^^^^ .. note:: The latest versions of scipy do not link to FFTW, so this step is no longer useful for scipy * Stay in msys shell * Goto fftw_XX * `mkdir build; cd build` * Run the following command:: ../configure --prefix=/c/DOWNLOADS/BUILDS/ --enable-sse2 --disable-dependency-tracking --enable-threads --with-our-malloc16 --with-windows-f77-mangling --with-combined-threads * Run `make` OR `make -j 4` if you have multiple processors (it'll make things go faster. This build on msys in vista takes a while) * Copy `.libs/libfftw3.a` to BUILDS/lib * Copy fftw_XX/api/fftw3.h to BUILDS/include Compiling numpy/scipy ^^^^^^^^^^^^^^^^^^^^^ .. note:: As above, note that the FFTW linking here is no longer useful for the scipy install * Open a Windows cmd window and make sure you can execute python. * Make a copy of each of the libs in BUILDS/lib and rename them from libname.a to name.lib * Rename lapack.lib to flapack.lib * rename site.cfg.example to site.cfg * Edit site.cfg in the numpy directory. Replace the blas_opt and lapack_opt section with:: [atlas] libraries = f77blas, cblas, atlas, g2c library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include\ATLAS [lapack] libraries = flapack, f77blas, cblas, atlas library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib [amd] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include libraries = amd [umfpack] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include\UMFPACK libraries = umfpack [fftw3] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include libraries = fftw3 * Edit numpy/distutils/fcompiler/gnu.py. Find the line that says `opt.append('gcc')` and comment it `# opt.append('gcc')`. This is probably a Vista SUA thing and perhaps won't be required when using Cygwin to compile ATLAS. * Copy site.cfg to ../scipy/site.cfg * Compile numpy:: cd numpy python setup.py config --compiler=mingw32 build --compiler=mingw32 bdist_wininst * Install numpy from the numpy/dist folder * Compile scipy:: cd scipy python setup.py config --compiler=mingw32 build --compiler=mingw32 bdist_wininst * Install scipy from the scipy/dist folder * Test installations. In python run:: import numpy import scipy numpy.test() scipy.test() numpy.show_config() scipy.show_config() nipy-0.6.1/doc/devel/planning/000077500000000000000000000000001470056100100161415ustar00rootroot00000000000000nipy-0.6.1/doc/devel/planning/TODO.rst000066400000000000000000000076031470056100100174460ustar00rootroot00000000000000.. _todo: =========================== TODO for nipy development =========================== This document will serve to organize current development work on nipy. It will include current sprint items, future feature ideas, and design discussions, etc... Documentation ============= * Create NIPY sidebar with links to all project related websites. * Create a Best Practices document. * Create a rst doc for *Request a review* process. Tutorials --------- Tutorials are an excellent way to document and test the software. Some ideas for tutorials to write in our Sphinx documentation (in no specific order): * Slice timing * Image resampling * Image IO * Registration using SPM/FSL * FMRI analysis * Making one 4D image from many 3D images, and vice versa. Document ImageList and FmriImageList. * Apply SPM registration .mat to a NIPY image. * Create working example out of this TRAC `pca `_ page. Should also be a rest document. * Add analysis pipeline(s) blueprint. Bugs ==== These should be moved to the nipy_ bug section on github. Placed here until they can be input. * Fix possible precision error in fixes.scipy.ndimage.test_registration function test_autoalign_nmi_value_2. See FIXME. * Fix error in test_segment test_texture2 functions (fixes.scipy.ndimage). See FIXME. * import nipy.algorithms is very slow! Find and fix. The shared library is slow. * base class for all new-style classes should be *object*; preliminary search with ``grin "class +[a-zA-Z0-9]+ *:"`` Refactorings ============ * image.save function should accept filename or file-like object. If I have an open file I would like to be able to pass that in also, instead of fp.name. Happens in test code a lot. * image._open function should accept Image objects in addition to ndarrays and filenames. Currently the save function has to call np.asarray(img) to get the data array out of the image and pass them to _open in order to create the output image. * Add dtype options when saving. When saving images it uses the native dtype for the system. Should be able to specify this. in the test_file_roundtrip, self.img is a uint8, but is saved to tmpfile as float64. Adding this would allow us to save images without the scaling being applied. * In image._open(url, ...), should we test if the "url" is a PyNiftiIO object already? This was in the tests from 'old code' and passed:: new = Image(self.img._data, self.img.grid) img._data is a PyNIftiIO object. It works, but we should verify it's harmless otherwise prevent it from happening. * Look at image.merge_image function. Is it still needed? Does it fit into the current api? * FmriImageList.emptycopy() - Is there a better way to do this? Matthew proposed possibly implementing Gael's dress/undress metadata example. * Verify documentation of the image generators. Create a simple example using them. * Use python 2.5 feature of being able to reset the generator? * Add test data where volumes contain intensity ramps. Slice with generator and test ramp values. * Implement `fmriimagelist blueprint `_. Code Design Thoughts ==================== A central location to dump thoughts that could be shared by the developers and tracked easily. Future Features =============== Put ideas here for features nipy should have but are not part of our current development. These features will eventually be added to a weekly sprint log. * Auto backup script for nipy repos to run as weekly cron job. We should setup a machine to perform regular branch builds and tests. This would also provide an on-site backup. * See if we can add bz2 support to nifticlib. * Should image.load have an optional squeeze keyword to squeeze a 4D image with one frame into a 3D image? .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/planning/index.rst000066400000000000000000000003231470056100100200000ustar00rootroot00000000000000.. _development_planning: ====================== Development Planning ====================== .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 roadmap.rst TODO.rst nipy-0.6.1/doc/devel/planning/roadmap.rst000066400000000000000000000013651470056100100203230ustar00rootroot00000000000000.. _roadmap: ============== Nipy roadmap ============== We plan to release a prototype of NIPY_ by the Summer of 2009. This will include a full FMRI analysis, 2D visualization, and integration with other packages for spatial processing (SPM_ and FSL_). We will continue to improve our documentation and tutorials with the aim of providing a full introduction to neuroimaging analysis. We will also extend our collaborations with other neuroimaging groups, integrating more functionality into NIPY and providing better interoperability with other packages. This will include the design and implementation of a pipeline/batching system, integration of registration algorithms, and improved 2D and 3D visualization. .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/tools/000077500000000000000000000000001470056100100154735ustar00rootroot00000000000000nipy-0.6.1/doc/devel/tools/index.rst000066400000000000000000000003151470056100100173330ustar00rootroot00000000000000.. _developer_tools: ================= Developer Tools ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 tricked_out_emacs virtualenv-tutor nipy-0.6.1/doc/devel/tools/tricked_out_emacs.rst000066400000000000000000000134741470056100100217220ustar00rootroot00000000000000.. _tricked_out_emacs: =================================== Tricked out emacs for python coding =================================== Various ways to configure your emacs that you might find useful. See emacs_python_mode_ for a good summary. .. _rst_emacs: ReST mode --------- For editing ReST documents like this one. You may need a recent version of the rst.el_ file from the docutils_ site. .. _rst.el: http://docutils.sourceforge.net/tools/editors/emacs/rst.el ``rst`` mode automates many important ReST tasks like building and updating table-of-contents, and promoting or demoting section headings. Here is the basic ``.emacs`` configuration:: (require 'rst) (setq auto-mode-alist (append '(("\\.txt$" . rst-mode) ("\\.rst$" . rst-mode) ("\\.rest$" . rst-mode)) auto-mode-alist)) Some helpful functions:: C-c TAB - rst-toc-insert Insert table of contents at point C-c C-u - rst-toc-update Update the table of contents at point C-c C-l rst-shift-region-left Shift region to the left C-c C-r rst-shift-region-right Shift region to the right .. note:: On older Debian-based releases, the default ``M-x rst-compile`` command uses ``rst2html.py`` whereas the command installed is ``rst2html``. Symlink was required as a quick fix. doctest mode ------------- This useful mode for writing doctests (``doctest-mode.el``) cames with ``python-mode`` package on Debian-based systems. Otherwise see doctest-mode_ project page. code checkers ------------- Code checkers within emacs can be useful to check code for errors, unused variables, imports and so on. Alternatives are pychecker_, pylint_ and pyflakes_. Note that rope_ (below) also does some code checking. pylint_ and pyflakes_ work best with emacs flymake_, which usually comes with emacs. pychecker_ `````````` This appears to be plumbed in with ``python-mode``, just do ``M-x py-pychecker-run``. If you try this, and pychecker_ is not installed, you will get an error. You can install it using your package manager (``pychecker`` on Debian-based systems) or from the pychecker_ webpage. pylint_ ``````` Install pylint_. Debian packages pylint_ as ``pylint``. Put the `flymake .emacs snippet`_ in your ``.emacs`` file. You will see, in the emacs_python_mode_ page, that you will need to save this:: #!/usr/bin/env python3 import re import sys from subprocess import * p = Popen("pylint -f parseable -r n --disable-msg-cat=C,R %s" % sys.argv[1], shell = True, stdout = PIPE).stdout for line in p.readlines(): match = re.search("\\[([WE])(, (.+?))?\\]", line) if match: kind = match.group(1) func = match.group(3) if kind == "W": msg = "Warning" else: msg = "Error" if func: line = re.sub("\\[([WE])(, (.+?))?\\]", "%s (%s):" % (msg, func), line) else: line = re.sub("\\[([WE])?\\]", "%s:" % msg, line) print line, p.close() as ``epylint`` somewhere on your system path, and test that ``epylint somepyfile.py`` works. pyflakes ```````` Install pyflakes_. Maybe your package manager again? (``apt-get install pyflakes``). Install the `flymake .emacs snippet`_ in your ``.emacs`` file. flymake .emacs snippet `````````````````````` Add this to your .emacs file:: ;; code checking via flymake ;; set code checker here from "epylint", "pyflakes" (setq pycodechecker "pyflakes") (when (load "flymake" t) (defun flymake-pycodecheck-init () (let* ((temp-file (flymake-init-create-temp-buffer-copy 'flymake-create-temp-inplace)) (local-file (file-relative-name temp-file (file-name-directory buffer-file-name)))) (list pycodechecker (list local-file)))) (add-to-list 'flymake-allowed-file-name-masks '("\\.py\\'" flymake-pycodecheck-init))) and set which of pylint_ ("epylint") or pyflakes_ ("pyflakes") you want to use. You may also consider using the ``flymake-cursor`` functions, see the ``pyflakes`` section of the emacs_python_mode_ page for details. ropemacs_ --------- rope_ is a python refactoring library, and ropemacs_ is an emacs interface to it, that uses pymacs_. pymacs_ is an interface between emacs lisp and python that allows emacs to call into python and python to call back into emacs. Install ```````` - rope_ - by downloading from the link, and running ``python setup.py install`` in the usual way. - pymacs_ - probably via your package manager - for example ``apt-get install pymacs`` - ropemacs_ - download from link, ``python setup.py install`` You may need to make sure your gnome etc sessions have the correct python path settings - for example settings in ``.gnomerc`` as well as the usual ``.bashrc``. Make sure you can `import ropemacs` from python (which should drop you into something lispey). Add these lines somewhere in your `.emacs` file:: (require 'pymacs) (pymacs-load "ropemacs" "rope-") and restart emacs. When you open a python file, you should have a ``rope`` menu. Note `C-c g` - the excellent `goto-definition` command. Switching between modes ----------------------- You may well find it useful to be able to switch fluidly between python mode, doctest mode, ReST mode and flymake mode (pylint_). You can attach these modes to function keys in your ``.emacs`` file with something like:: (global-set-key [f8] 'flymake-mode) (global-set-key [f9] 'python-mode) (global-set-key [f10] 'doctest-mode) (global-set-key [f11] 'rst-mode) emacs code browser ------------------ Not really python specific, but a rather nice set of windows for browsing code directories, and code - see the ECB_ page. Again, your package manager may help you (``apt-get install ecb``). .. include:: ../../links_names.txt nipy-0.6.1/doc/devel/tools/virtualenv-tutor.rst000066400000000000000000000142661470056100100216100ustar00rootroot00000000000000Setting up virtualenv ===================== .. Contents:: Overview -------- virtualenv_ is a tool that allows you to install python packages in isolated environments. In this way you can have multiple versions of the same package without interference. I started using this to easily switch between multiple versions of numpy without having to constantly reinstall and update my symlinks. I also did this as a way to install software for Scipy2008_, like the Enthought Tool Suite (ETS_), in a way that would not effect my current development environment. This tutorial is based heavily on a blog entry from Prabhu_. I've extended his shell script to make switching between virtual environments a one-command operation. (Few others who should be credited for encouraging me to use virtualenv_: Gael_, Jarrod_, Fernando_) Installing ---------- Download and install the tarball for virtualenv_:: tar xzf virtualenv-1.1.tar.gz cd virtualenv-1.1 python setup.py install --prefix=$HOME/local Note: I install in a local directory, your install location may differ. Setup virtualenv ---------------- Setup a base virtualenv directory. I create this in a local directory, you can do this in a place of your choosing. All virtual environments will be installed as subdirectories in here.:: cd ~/local mkdir -p virtualenv Create a virtualenv ------------------- Create a virtual environment. Here I change into my virtualenv directory and create a virtual environment for my numpy-1.1.1 install:: cd virtualenv/ virtualenv numpy-1.1.1 Activate a virtualenv --------------------- Set the numpy-1.1.1 as the *active* virtual environment:: ln -s numpy-1.1.1/bin/activate . We *enable* the numpy-1.1.1 virtual environment by sourcing it's activate script. This will prepend our `PATH` with the currently active virtual environment.:: # note: still in the ~/local/virtualenv directory source activate We can see our `PATH` with the numpy-1.1.1 virtual environment at the beginning. Also not the label of the virtual environment prepends our prompt.:: (numpy-1.1.1)cburns@~ 20:23:54 $ echo $PATH /Users/cburns/local/virtualenv/numpy-1.1.1/bin: /Library/Frameworks/Python.framework/Versions/Current/bin: /Users/cburns/local/bin: /usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin:/usr/local/git/bin Install packages into a virtualenv ---------------------------------- Then we install numpy-1.1.1 into the virtual environment. In order to install packages in the virtual environment, you need to use the *python* or *easy_install* from that virtualenv.:: ~/local/virtualenv/numpy-1.1.1/bin/python setup.py install At this point any package I install in this virtual environment will only be used when the environment is active. Pragmatic virtualenv -------------------- There are a few more manual steps in the above process then I wanted, so I extended the shell script that Prabhu_ wrote to make this a simple one-command operation. One still needs to manually create each virtual environment, and install packages, but this script simplifies activating and deactivating them. The `venv_switch.sh` script will: * Activate the selected virtual environment. (Or issue an error if it doesn't exist.) * Launch a new bash shell using the ~/.virtualenvrc file which sources the virtualenv/activate script. * The activate script modifies the PATH and prepends the bash prompt with the virtualenv label. `venv_switch.sh`:: #!/bin/sh # venv_switch.sh # switch between different virtual environments # verify a virtualenv is passed in if [ $# -ne 1 ] then echo 'Usage: venv_switch venv-label' exit -1 fi # verify the virtualenv exists VENV_PATH=~/local/virtualenv/$1 # activate env script ACTIVATE_ENV=~/local/virtualenv/activate echo $VENV_PATH if [ -e $VENV_PATH ] then echo 'Switching to virtualenv' $VENV_PATH echo "Starting new bash shell. Simply 'exit' to return to previous shell" else echo 'Error: virtualenv' $VENV_PATH 'does not exist!' exit -1 fi rm $ACTIVATE_ENV ln -s ~/local/virtualenv/$1/bin/activate $ACTIVATE_ENV # Launch new terminal bash --rcfile ~/.virtualenvrc Now to activate our numpy-1.1.1 virtual environment, we simply do:: venv_switch.sh numpy-1.1.1 To deactivate the virtual environment and go back to your original environment, just exit the bash shell:: exit The rcfile used to source the activate script. I first source my .profile to setup my environment and custom prompt, then source the virtual environment. `.virtualenvrc`:: # rc file to initialize bash environment for virtualenv sessions # first source the bash_profile source ~/.bash_profile # source the virtualenv source ~/local/virtualenv/activate Installing ETS 3.0.0 -------------------- As another example, I installed ETS_ 3.0.0 for the Tutorial sessions at Scipy2008_. (Note the prerequisites_.) Set up an ets-3.0.0 virtualenv:: cburns@virtualenv 15:23:50 $ pwd /Users/cburns/local/virtualenv cburns@virtualenv 15:23:50 $ virtualenv ets-3.0.0 New python executable in ets-3.0.0/bin/python Installing setuptools............done. cburns@virtualenv 15:24:29 $ ls activate ets-3.0.0 numpy-1.1.1 numpy-1.2.0b2 Switch into my ets-3.0.0 virtualenv using the `venv_switch.sh` script:: cburns@~ 15:29:12 $ venv_switch.sh ets-3.0.0 /Users/cburns/local/virtualenv/ets-3.0.0 Switching to virtualenv /Users/cburns/local/virtualenv/ets-3.0.0 Starting new bash shell. Simply 'exit' to return to previous shell Install ETS_ using easy_install. Note we need to use the easy_install from our ets-3.0.0 virtual environment:: (ets-3.0.0)cburns@~ 15:31:41 $ which easy_install /Users/cburns/local/virtualenv/ets-3.0.0/bin/easy_install (ets-3.0.0)cburns@~ 15:31:48 $ easy_install ETS .. include:: ../../links_names.txt .. _Prabhu: http://prabhuramachandran.blogspot.com/2008/03/using-virtualenv-under-linux.html .. _Gael: http://gael-varoquaux.info/blog/ .. _Jarrod: http://jarrodmillman.blogspot.com/ .. _Fernando: http://fdoperez.blogspot.com/search/label/scipy .. _Scipy2008: http://conference.scipy.org/ .. _prerequisites: https://svn.enthought.com/enthought/wiki/Install nipy-0.6.1/doc/documentation.rst000066400000000000000000000005761470056100100166470ustar00rootroot00000000000000.. _documentation-main: ==================== NIPY documentation ==================== .. only:: html :Release: |version| :Date: |today| Contents: .. toctree:: :maxdepth: 2 users/index.rst labs/index.rst devel/index.rst faq/index.rst api/index.rst publications license .. only:: html * :ref:`genindex` * :ref:`modindex` * :ref:`search` nipy-0.6.1/doc/faq/000077500000000000000000000000001470056100100140035ustar00rootroot00000000000000nipy-0.6.1/doc/faq/documentation_faq.rst000066400000000000000000000042141470056100100202360ustar00rootroot00000000000000.. _documentation_faq: =================== Documentation FAQ =================== .. _installing_graphviz_on_OSX: Installing graphviz on OSX -------------------------- The easiest way I found to do this was using MacPorts_, all other methods caused python exceptions when attempting to write out the pngs in the inheritance_diagram.py functions. Just do:: sudo port install graphviz And make sure your macports directory (``/opt/local/bin``) is in your PATH. Error writing output on OSX --------------------------- If you are getting an error during the **writing output...** phase of the documentation build you may have a problem with your graphviz_ install. The error may look something like:: **writing output...** about api/generated/gen api/generated/nipy api/generated/nipy.algorithms.fwhm Format: "png" not recognized. Use one of: canon cmap cmapx cmapx_np dia dot eps fig hpgl imap imap_np ismap mif mp pcl pic plain plain-ext ps ps2 svg svgz tk vml vmlz vtx xdot ... Exception occurred: File "/Users/cburns/src/nipy-repo/trunk-dev/doc/sphinxext/ inheritance_diagram.py", line 238, in generate_dot (name, self._format_node_options(this_node_options))) IOError: [Errno 32] Broken pipe Try installing graphviz using MacPorts_. See the :ref:`installing_graphviz_on_OSX` for instructions. Sphinx and reST gotchas ----------------------- Docstrings ^^^^^^^^^^ Sphinx_ and reST_ can be very picky about whitespace. For example, in the docstring below the *Parameters* section will render correctly, where the *Returns* section will not. By correctly I mean Sphinx will insert a link to the CoordinateSystem class in place of the cross-reference *:class:`CoordinateSystem`*. The *Returns* section will be rendered exactly as shown below with the *:class:* identifier and the backticks around CoordinateSystem. This section fails because of the missing whitespace between ``product_coord_system`` and the colon ``:``. :: Parameters ---------- coord_systems : sequence of :class:`CoordinateSystem` Returns ------- product_coord_system: :class:`CoordinateSystem` .. include:: ../links_names.txt nipy-0.6.1/doc/faq/index.rst000066400000000000000000000003151470056100100156430ustar00rootroot00000000000000.. _faq-index: ===== FAQ ===== .. only:: html :Release: |version| :Date: |today| Frequently asked questions about nipy .. toctree:: :maxdepth: 2 why licensing documentation_faq nipy-0.6.1/doc/faq/johns_bsd_pitch.rst000066400000000000000000000147511470056100100177050ustar00rootroot00000000000000.. _johns-bsd-pitch: Why we should be using BSD ========================== John Hunter - 16 Dec 2004 I'll start by summarizing what many of you already know about open source licenses. I believe this discussion is broadly correct, though it is not a legal document and if you want legally precise statements you should reference the original licenses cited here. The `Open-Source-Initiative `_ is a clearing house for OS licenses, so you can read more there. The two dominant license variants in the wild are GPL-style and BSD-style. There are countless other licenses that place specific restrictions on code reuse, but the purpose of this document is to discuss the differences between the GPL and BSD variants, specifically in regards to my experience developing matplotlib_ and in my discussions with other developers about licensing issues. The best known and perhaps most widely used license is the :term:`GPL`, which in addition to granting you full rights to the source code including redistribution, carries with it an extra obligation. If you use GPL code in your own code, or link with it, your product must be released under a GPL compatible license. I.e., you are required to give the source code to other people and give them the right to redistribute it as well. Many of the most famous and widely used open source projects are released under the GPL, including linux, gcc and emacs. The second major class are the :term:`BSD` and BSD-style licenses (which includes MIT and the python PSF license). These basically allow you to do whatever you want with the code: ignore it, include it in your own open source project, include it in your proprietary product, sell it, whatever. python itself is released under a BSD compatible license, in the sense that, quoting from the PSF license page There is no GPL-like "copyleft" restriction. Distributing binary-only versions of Python, modified or not, is allowed. There is no requirement to release any of your source code. You can also write extension modules for Python and provide them only in binary form. Famous projects released under a BSD-style license in the permissive sense of the last paragraph are the BSD operating system, python, and TeX. I believe the choice of license is an important one, and I advocate a BSD-style license. In my experience, the most important commodity an open source project needs to succeed is users. Of course, doing something useful is a prerequisite to getting users, but I also believe users are something of a prerequisite to doing something useful. It is very difficult to design in a vacuum, and users drive good software by suggesting features and finding bugs. If you satisfy the needs of some users, you will inadvertently end up satisfying the needs of a large class of users. And users become developers, especially if they have some skills and find a feature they need implemented, or if they have a thesis to write. Once you have a lot of users and a number of developers, a network effect kicks in, exponentially increasing your users and developers. In open source parlance, this is sometimes called competing for mind share. So I believe the number one (or at least number two) commodity an open source project can possess is mind share, which means you want as many damned users using your software as you can get. Even though you are giving it away for free, you have to market your software, promote it, and support it as if you were getting paid for it. Now, how does this relate to licensing, you are asking? Most software companies will not use GPL code in their own software, even those that are highly committed to open source development, such as enthought_, out of legitimate concern that use of the GPL will "infect" their code base by its viral nature. In effect, they want to retain the right to release some proprietary code. And in my experience, companies make for some of the best developers, because they have the resources to get a job done, even a boring one, if they need it in their code. Two of the matplotlib backends (FLTK and WX) were contributed by private sector companies who are using matplotlib either internally or in a commercial product -- I doubt these companies would have been using matplotlib if the code were GPL. In my experience, the benefits of collaborating with the private sector are real, whereas the fear that some private company will "steal" your product and sell it in a proprietary application leaving you with nothing is not. There is a lot of GPL code in the world, and it is a constant reality in the development of matplotlib that when we want to reuse some algorithm, we have to go on a hunt for a non-GPL version. Most recently this occurred in a search for a good contouring algorithm. I worry that the "license wars", the effect of which are starting to be felt on many projects, have a potential to do real harm to open source software development. There are two unpalatable options. 1) Go with GPL and lose the mind-share of the private sector 2) Forgo GPL code and retain the contribution of the private sector. This is a very tough decision because there is a lot of very high quality software that is GPLd and we need to use it; they don't call the license `viral `_ for nothing. The third option, which is what is motivating me to write this, is to convince people who have released code under the GPL to re-release it under a BSD compatible license. Package authors retain the copyright to their software and have discretion to re-release it under a license of their choosing. Many people choose the GPL when releasing a package because it is the most famous open source license, and did not consider issues such as those raised here when choosing a license. When asked, these developers will often be amenable to re-releasing their code under a more permissive license. Fernando Perez did this with ipython, which was released under the :term:`LGPL` and then re-released under a BSD license to ease integration with matplotlib, scipy and enthought code. The LGPL is more permissive than the GPL, allowing you to link with it non-virally, but many companies are still loathe to use it out of legal concerns, and you cannot reuse LGPL code in a proprietary product. So I encourage you to release your code under a BSD compatible license, and when you encounter an open source developer whose code you want to use, encourage them to do the same. Feel free to forward this document to them. .. include:: ../links_names.txt nipy-0.6.1/doc/faq/licensing.rst000066400000000000000000000057371470056100100165240ustar00rootroot00000000000000.. _licensing: =========== Licensing =========== How do you spell licence? ------------------------- If you are British you spell it differently from Americans, sometimes: http://www.tiscali.co.uk/reference/dictionaries/english/data/d0082350.html As usual the American spelling rule (always use *s*) was less painful and arbitrary, so I (MB) went for that. Why did you choose BSD? ----------------------- We have chosen BSD licensing, for compatibility with SciPy, and to increase input from developers in industry. Wherever possible we will keep packages that can have BSD licensing separate from packages needing a GPL license. Our choices were between: * :term:`BSD` * :term:`GPL` John Hunter made the argument for the BSD license in :ref:`johns-bsd-pitch`, and we agree. Richard Stallman makes the case for the GPL here: http://www.gnu.org/licenses/why-not-lgpl.html How does the BSD license affect our relationship to other projects? ------------------------------------------------------------------- The BSD license allows other projects with virtually any license, including GPL, to use our code. BSD makes it more likely that we will attract support from companies, including open-source software companies, such as Enthought_ and Kitware_. Any part of our code that uses (links to) GPL code, should be in a separable package. Note that we do not have this problem with :term:`LGPL`, which allows us to link without ourselves having a GPL. What license does the NIH prefer? --------------------------------- The NIH asks that software written with NIH money can be commercialized. Quoting from: `NIH NATIONAL CENTERS FOR BIOMEDICAL COMPUTING `_ grant application document: A software dissemination plan must be included in the application. There is no prescribed single license for software produced in this project. However NIH does have goals for software dissemination, and reviewers will be instructed to evaluate the dissemination plan relative to these goals: 1. The software should be freely available to biomedical researchers and educators in the non-profit sector, such as institutions of education, research institutes, and government laboratories. 2. The terms of software availability should permit the commercialization of enhanced or customized versions of the software, or incorporation of the software or pieces of it into other software packages. There is more discussion of licensing in this `na-mic presentation `_. See also these links (from the presentation): * http://www.rosenlaw.com/oslbook.htm * http://www.opensource.org * http://wiki.na-mic.org/Wiki/index.php/NAMIC_Wiki:Community_Licensing So far this might suggest that the NIH would prefer at least a BSD-like license, but the NIH has supported several GPL'd projects in imaging, :term:`AFNI` being the most obvious example. .. include:: ../links_names.txt nipy-0.6.1/doc/faq/why.rst000066400000000000000000000143761470056100100153570ustar00rootroot00000000000000.. _why-faq: ========= Why ... ========= Why nipy? --------- We are writing NIPY because we hope that it will solve several problems in the field at the moment. We are concentrating on FMRI analysis, so we'll put the case for that part of neuroimaging for now. There are several good FMRI analysis packages already - for example :term:`SPM`, :term:`FSL` and :term:`AFNI`. For each of these you can download the source code. Like SPM, AFNI and FSL, we think source code is essential for understanding and development. With these packages you can do many analyses. Some problems are that: * The packages don't mix easily. You'll have to write your own scripts to mix between them; this is time-consuming and error-prone, because you will need good understanding of each package * Because they don't mix, researchers usually don't try and search out the best algorithm for their task - instead they rely on the software that they are used to * Each package has its own user community, so it's a little more difficult to share software and ideas * The core development of each language belongs in a single lab. Another, more general problem, is planning for the future. We need a platform that can be the basis for large scale shared development. For various reasons, it isn't obvious to us that any of these three is a good choice for common, shared development. In particular, we think that Python is the obvious choice for a large open-source software project. By comparison, matlab is not sufficiently general or well-designed as a programming language, and C / C++ are too hard and slow for scientific programmers to read or write. See why-python_ for this argument in more detail. We started NIPY because we want to be able to: * support an open collaborative development environment. To do this, we will have to make our code very easy to understand, modify and extend. If make our code available, but we are the only people who write or extend it, in practice, that is closed software. * make the tools that allow developers to pick up basic building blocks for common tasks such as registration and statistics, and build new tools on top. * write a scripting interface that allows you to mix in routines from the other packages that you like or that you think are better than the ones we have. * design ways of interacting with the data and analysis stream that help you organize both. That way you can more easily keep track of your analyses. We also hope this will make analyses easier to run in parallel, and therefore much faster. .. _why-python: Why python? ----------- The choice of programming language has many scientific and practical consequences. Matlab is an example of a high-level language. Languages are considered high level if they are able to express a large amount of functionality per line of code; other examples of high level languages are Python, Perl, Octave, R and IDL. In contrast, C is a low-level language. Low level languages can achieve higher execution speed, but at the cost of code that is considerably more difficult to read. C++ and Java occupy the middle ground sharing the advantages and the disadvantages of both levels. Low level languages are a particularly ill-suited for exploratory scientific computing, because they present a high barrier to access by scientists that are not specialist programmers. Low-level code is difficult to read and write, which slows development ([Prechelt2000ECS]_, [boehm1981]_, [Walston1977MPM]_) and makes it more difficult to understand the implementation of analysis algorithms. Ultimately this makes it less likely that scientists will use these languages for development, as their time for learning a new language or code base is at a premium. Low level languages do not usually offer an interactive command line, making data exploration much more rigid. Finally, applications written in low level languages tend to have more bugs, as bugs per line of code is approximately constant across many languages [brooks78]. In contrast, interpreted, high-level languages tend to have easy-to-read syntax and the native ability to interact with data structures and objects with a wide range of built-in functionality. High level code is designed to be closer to the level of the ideas we are trying to implement, so the developer spends more time thinking about what the code does rather than how to write it. This is particularly important as it is researchers and scientists who will serve as the main developers of scientific analysis software. The fast development time of high-level programs makes it much easier to test new ideas with prototypes. Their interactive nature allows researchers flexible ways to explore their data. SPM is written in Matlab, which is a high-level language specialized for matrix algebra. Matlab code can be quick to develop and is relatively easy to read. However, Matlab is not suitable as a basis for a large-scale common development environment. The language is proprietary and the source code is not available, so researchers do not have access to core algorithms making bugs in the core very difficult to find and fix. Many scientific developers prefer to write code that can be freely used on any computer and avoid proprietary languages. Matlab has structural deficiencies for large projects: it lacks scalability and is poor at managing complex data structures needed for neuroimaging research. While it has the ability to integrate with other languages (e.g., C/C++ and FORTRAN) this feature is quite impoverished. Furthermore, its memory handling is weak and it lacks pointers - a major problem for dealing with the very large data structures that are often needed in neuroimaging. Matlab is also a poor choice for many applications such as system tasks, database programming, web interaction, and parallel computing. Finally, Matlab has weak GUI tools, which are crucial to researchers for productive interactions with their data. .. [boehm1981] Boehm, Barry W. (1981) *Software Engineering Economics*. Englewood Cliffs, NJ: Prentice-Hall. .. [Prechelt2000ECS] Prechelt, Lutz. 2000. An Empirical Comparison of Seven Programming Languages. *IEEE Computer* 33, 23--29. .. [Walston1977MPM] Walston, C E, and C P Felix. 1977. A Method of Programming Measurement and Estimation. *IBM Syst J* 16, 54-73. nipy-0.6.1/doc/glossary.rst000066400000000000000000000215461470056100100156410ustar00rootroot00000000000000========== Glossary ========== .. glossary:: AFNI AFNI_ is a functional imaging analysis package. It is funded by the NIMH, based in Bethesda, Maryland, and directed by Robert Cox. Like :term:`FSL`, it is written in C, and it's very common to use shell scripting of AFNI command line utilities to automate analyses. Users often describe liking AFNI's scriptability, and image visualization. It uses the :term:`GPL` license. BSD Berkeley software distribution license. The BSD_ license is permissive, in that it allows you to modify and use the code without requiring that you use the same license. It allows you to distribute closed-source binaries. BOLD Contrast that is blood oxygen level dependent. When a brain area becomes active, blood flow increases to that area. It turns out that, with the blood flow increase, there is a change in the relative concentrations of oxygenated and deoxygenated hemoglobin. Oxy- and deoxy- hemoglobin have different magnetic properties. This in turn leads to a change in MRI signal that can be detected by collecting suitably sensitive MRI images at regular short intervals during the blood flow change. See the `Wikipedia FMRI`_ article for more detail. BrainVisa BrainVISA_ is a sister project to NIPY. It also uses Python, and provides a carefully designed framework and automatic GUI for defining imaging processing workflows. It has tools to integrate command line and other utilities into these workflows. Its particular strength is anatomical image processing but it also supports FMRI and other imaging modalities. BrainVISA is based in `NeuroSpin `_, outside Paris. DTI Diffusion tensor imaging. DTI is rather poorly named, because it is a model of the diffusion signal, and an analysis method, rather than an imaging method. The simplest and most common diffusion tensor model assumes that diffusion direction and velocity at every voxel can be modeled by a single tensor - that is, by an ellipse of regular shape, fully described by the length and orientation of its three orthogonal axes. This model can easily fail in fairly common situations, such as white-matter fiber track crossings. DWI Diffusion-weighted imaging. DWI is the general term for MRI imaging designed to image diffusion processes. Sometimes researchers use :term:`DTI` to have the same meaning, but :term:`DTI` is a common DWI signal model and analysis method. EEGlab The most widely-used open-source package for analyzing electro-physiological data. EEGlab_ is written in :term:`matlab` and uses a :term:`GPL` license. FMRI Functional magnetic resonance imaging. It refers to MRI image acquisitions and analysis designed to look at brain function rather than structure. Most people use FMRI to refer to :term:`BOLD` imaging in particular. See the `Wikipedia FMRI`_ article for more detail. FSL FSL_ is the FMRIB_ software library, written by the FMRIB_ analysis group, and directed by Steve Smith. Like :term:`AFNI`, it is a large collection of C / C++ command line utilities that can be scripted with a custom GUI / batch system, or using shell scripting. Its particular strength is analysis of :term:`DWI` data, and :term:`ICA` functional data analysis, although it has strong tools for the standard :term:`SPM approach` to FMRI. It is free for academic use, and open-source, but not free for commercial use. GPL The GPL_ is the GNU general public license. It is one of the most commonly-used open-source software licenses. The distinctive feature of the GPL license is that it requires that any code derived from GPL code also uses a GPL license. It also requires that any code that is statically or dynamically linked to GPL code has a GPL-compatible license. See: `Wikipedia GPL `_ and ``_. ICA Independent component analysis is a multivariate technique related to :term:`PCA`, to estimate independent components of signal from multiple sensors. In functional imaging, this usually means detecting underlying spatial and temporal components within the brain, where the brain voxels can be considered to be different sensors of the signal. See the `Wikipedia ICA`_ page. LGPL The lesser GNU public license. LGPL_ differs from the :term:`GPL` in that you can link to LGPL code from non-LGPL code without having to adopt a GPL-compatible license. However, if you modify the code (create a "derivative work"), that modification has to be released under the LGPL. See `Wikipedia LGPL `_ for more discussion. Matlab matlab_ began as a high-level programming language for working with matrices. Over time it has expanded to become a fairly general-purpose language. See also: `Wikipedia MATLAB `_. It has good numerical algorithms, 2D graphics, and documentation. There are several large neuroscience software projects written in MATLAB, including :term:`SPM software`, and :term:`EEGlab`. PCA Principal component analysis is a multivariate technique to determine orthogonal components across multiple sources (or sensors). See :term:`ICA` and the `Wikipedia PCA`_ page. PET Positron emission tomography is a method of detecting the spatial distributions of certain radio-labeled compounds - usually in the brain. The scanner detectors pick up the spatial distribution of emitted radiation from within the body. From this pattern, it is possible to reconstruct the distribution of radiactivity in the body, using techniques such as filtered back projection. PET was the first mainstream technique used for detecting regional changes in blood-flow as an index of which brain areas were active when the subject is doing various tasks, or at rest. These studies nearly all used :term:`water activation PET`. See the `Wikipedia PET`_ entry. SPM SPM (statistical parametric mapping) refers either to the :term:`SPM approach` to analysis or the :term:`SPM software` package. SPM approach Statistical parametric mapping is a way of analyzing data, that involves creating an image (the *map*) containing statistics, and then doing tests on this statistic image. For example, we often create a t statistic image where each :term:`voxel` contains a t statistic value for the time-series from that voxel. The :term:`SPM software` package implements this approach - as do several others, including :term:`FSL` and :term:`AFNI`. SPM software SPM_ (statistical parametric mapping) is the name of the matlab_ based package written by John Ashburner, Karl Friston and others at the `Functional Imaging Laboratory`_ in London. More people use the SPM package to analyze :term:`FMRI` and :term:`PET` data than any other. It has good lab and community support, and the :term:`matlab` source code is available under the :term:`GPL` license. VoxBo Quoting from the Voxbo_ webpage - "VoxBo is a software package for the processing, analysis, and display of data from functional neuroimaging experiments". Like :term:`SPM`, :term:`FSL` and :term:`AFNI`, VoxBo provides algorithms for a full FMRI analysis, including statistics. It also provides software for lesion-symptom analysis, and has a parallel scripting engine. VoxBo has a :term:`GPL` license. Dan Kimberg leads development. voxel Voxels are volumetric pixels - that is, they are values in a regular grid in three dimensional space - see the `Wikipedia voxel `_ entry. water activation PET A :term:`PET` technique to detect regional changes in blood flow. Before each scan, we inject the subject with radio-labeled water. The radio-labeled water reaches the arterial blood, and then distributes (to some extent) in the brain. The concentration of radioactive water increases in brain areas with higher blood flow. Thus, the image of estimated counts in the brain has an intensity that is influenced by blood flow. This use has been almost completely replaced by the less invasive :term:`BOLD` :term:`FMRI` technique. .. include:: links_names.txt nipy-0.6.1/doc/history.rst000066400000000000000000000024661470056100100154770ustar00rootroot00000000000000################# A history of NIPY ################# Sometime around 2002, Jonthan Taylor started writing BrainSTAT, a Python version of Keith Worsley's FmriSTAT package. In 2004, Jarrod Millman and Matthew Brett decided that they wanted to write a grant to build a new neuroimaging analysis package in Python. Soon afterwards, they found that Jonathan had already started, and merged efforts. At first we called this project *BrainPy*. Later we changed the name to NIPY. In 2005, Jarrod, Matthew and Jonathan, along with Mark D'Esposito, Fernando Perez, John Hunter, Jean-Baptiste Poline, and Tom Nichols, submitted the first NIPY grant to the NIH. It was not successful. In 2006, Jarrod and Mark submitted a second grant, based on the first. The NIH gave us 3 years of funding for two programmers. We hired two programmers in 2007 - Christopher Burns and Tom Waite - and began work on refactoring the code. Meanwhile, the team at Neurospin, Paris, started to refactor their FFF code to work better with Python and NIPY. This work was by Alexis Roche, Bertrand Thirion, and Benjamin Thyreau, with some help and advice from Fernando Perez. In 2008, Fernando Perez and Matthew Brett started work full-time at the UC Berkeley `Brain Imaging Center `_. Matthew in particular came to work on NIPY. nipy-0.6.1/doc/index.rst000066400000000000000000000012441470056100100150760ustar00rootroot00000000000000.. _about_nipy: ==== NIPY ==== NIPY is a python project for analysis of structural and functional neuroimaging data. Please see our :ref:`documentation-main` and feel free to hold us to the high ideals of :ref:`nipy-mission`. *The NIPY team* .. We need the following toctree directive to include the documentation .. in the document hierarchy - see http://sphinx.pocoo.org/concepts.html .. toctree:: :hidden: documentation devel/code_discussions/coordmap_notes devel/guidelines/compiling_windows devel/images devel/install/windows_scipy_build faq/johns_bsd_pitch references/brainpy_abstract users/install_data users/math_coordmap nipy-0.6.1/doc/labs/000077500000000000000000000000001470056100100141555ustar00rootroot00000000000000nipy-0.6.1/doc/labs/datasets.rst000066400000000000000000000127141470056100100165240ustar00rootroot00000000000000 ============================= Volumetric data structures ============================= Volumetric data structures expose numerical values embedded in a world space. For instance, a volume could expose the T1 intensity, as acquired in scanner space, or the BOLD signal in MNI152 template space. The values can be multi-dimensional, in the case of a BOLD signal, the fMRI signal would correspond to a time series at each position in world space. .. currentmodule:: nipy.labs.datasets.volumes.volume_img The image structure: :class:`VolumeImg` ======================================= The structure most often used in neuroimaging is the :class:`VolumeImg`. It corresponds, for instance, to the structure used in the Nifti files. This structure stores data as an n-dimensional array, with n being at least 3, alongside with the necessary information to map it to world space. :definition: A volume-image (class: :class:`VolumeImg`) is a volumetric datastructure given by data points lying on a regular grid: this structure is a generalization of an image in 3D. The voxels, vertices of the grid, are mapped to coordinates by an affine transformation. As a result, the grid is regular and evenly-spaced, but may not be orthogonal, and the spacing may differ in the 3 directions. .. image:: datasets/volume_img.jpg The data is exposed in a multi dimensional array, with the 3 first axis corresponding to spatial directions. A complete description of this object can be found on the page: :class:`VolumeImg`. Useful methods on volume structures ==================================== .. currentmodule:: nipy.labs.datasets.volumes.volume_field Any general volume structures will implement methods for querying the values and changing world space (see the :class:`VolumeField` documentation for more details): .. autosummary:: :toctree: generated VolumeField.values_in_world VolumeField.composed_with_transform Also, as volumes structure may describe the spatial data in various way, you can easily to convert to a :class:`VolumeImg`, ie a regular grid, for instance to do implement an algorithm on the grid such as spatial smoothing: .. autosummary:: :toctree: generated VolumeField.as_volume_img Finally, different structures can embed the data differently in the same world space, for instance with different resolution. You can resample one structure on another using: .. autosummary:: :toctree: generated VolumeField.resampled_to_img **FIXME:** Examples would be good here, but first we need io and template data to be wired with datasets. More general data structures =============================== .. currentmodule:: nipy.labs.datasets.volumes.volume_img The :class:`VolumeImg` is the most commonly found volume structure, and the simplest to understand, however, volumetric data can be described in more generic terms, and for performance reason it might be interesting to use other objects. Here, we give a list of the nipy volumetric data structures, from most specific, to most general. When you deal with volume structures in your algorithms, depending on which volume structure class you are taking as an input, you can assume different properties of the data. You can always use :meth:`VolumeImg.as_volume_img` to cast the volume structure in a :class:`VolumeImg` that is simple to understand and easy to work with, but it may not be necessary. Implemented classes -------------------- Implemented classes (or `concrete` classes) are structures that you can readily use directly from nipy. .. currentmodule:: nipy.labs.datasets.volumes.volume_grid :class:`VolumeGrid` In a :class:`VolumeGrid`, the data points are sampled on a 3D grid, but unlike for a :class:`VolumeImg`, grid may not be regular. For instance, it can be a grid that has been warped by a non-affine transformation. Like with the :class:`VolumeImg`, the data is exposed in a multi dimensional array, with the 3 first axis corresponding to spatial directions. .. image:: datasets/volume_grid.jpg Abstract classes ------------------ .. currentmodule:: nipy.labs.datasets.volumes.volume_data Abstract classes cannot be used because they are incompletely implemented. They serve as to define the interface: the type of objects that you can use, or how you can extend nipy by exposing the same set of methods and attributes (the `interface`). :class:`VolumeData` In this volumetric structure, the data is sampled for some points in the world space. The object knows how to interpolate between these points. The underlying values are stored in a multidimensional array-like object that can be indexed and sliced. .. image:: datasets/volume_data.jpg This is an abstract base class: it defines an interface, but is not fully functional, and can be used only via its children class (such as :class:`VolumeGrid` or :class:`VolumeImg`). .. currentmodule:: nipy.labs.datasets.volumes.volume_field :class:`VolumeField` This is the most general volumetric structure (base class): all the nipy volume expose this interface. This structure does not make any assumptions on how the values are internal represented, they may, for instance, be represented as a function, rather than as data points, or as a data structure that is not an array, such as a graph. .. image:: datasets/volume_field.jpg This is also an abstract base class: it defines the core nipy volumetric data structure interface: you can rely on all the methods documented for this class in any nipy data structure. nipy-0.6.1/doc/labs/datasets/000077500000000000000000000000001470056100100157655ustar00rootroot00000000000000nipy-0.6.1/doc/labs/datasets/viz_volume_data.py000066400000000000000000000012251470056100100215270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeData """ import numpy as np from enthought.mayavi import mlab x, y, z, s = np.random.random((4, 20)) mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_scatter(x, y, z, s) sgrid = mlab.pipeline.delaunay3d(src) mlab.pipeline.surface(sgrid, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(sgrid), color=(0, 0, 0)) mlab.pipeline.glyph(sgrid, mode='cube', scale_factor=0.05, scale_mode='none') mlab.savefig('volume_data.jpg') mlab.show() nipy-0.6.1/doc/labs/datasets/viz_volume_field.py000066400000000000000000000012131470056100100216760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeData """ import numpy as np from enthought.mayavi import mlab s = np.random.random((5, 5, 5)) # Put the side at 0 s[0, ...] = 0 s[-1, ...] = 0 s[:, 0, :] = 0 s[:, -1, :] = 0 s[..., 0] = 0 s[..., -1] = 0 mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_field(s) mlab.pipeline.volume(src, vmin=0, vmax=0.9) # We save as a different filename than the one used, as we modify the # curves. mlab.savefig('volume_field_raw.jpg') mlab.show() nipy-0.6.1/doc/labs/datasets/viz_volume_grid.py000066400000000000000000000017571470056100100215550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeGrid """ import numpy as np from enthought.mayavi import mlab from enthought.tvtk.api import tvtk dims = (4, 4, 4) x, y, z = np.mgrid[0.:dims[0], 0:dims[1], 0:dims[2]] x = np.reshape(x.T, (-1,)) y = np.reshape(y.T, (-1,)) z = np.reshape(z.T, (-1,)) y += 0.3*np.sin(x) z += 0.4*np.cos(x) x += 0.05*y**3 sgrid = tvtk.StructuredGrid(dimensions=(dims[0], dims[1], dims[2])) sgrid.points = np.c_[x, y, z] s = np.random.random(dims[0]*dims[1]*dims[2]) sgrid.point_data.scalars = np.ravel(s.copy()) sgrid.point_data.scalars.name = 'scalars' mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() mlab.pipeline.surface(sgrid, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(sgrid), color=(0, 0, 0)) mlab.pipeline.glyph(sgrid, mode='cube', scale_factor=0.2, scale_mode='none') mlab.savefig('volume_grid.jpg') mlab.show() nipy-0.6.1/doc/labs/datasets/viz_volume_img.py000066400000000000000000000013001470056100100213640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeImg """ import numpy as np from enthought.mayavi import mlab rand = np.random.RandomState(1) data = rand.random_sample((5, 4, 4)) mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_field(data) src.image_data.spacing = (0.5, 1, 0.7) src.image_data.update_data() mlab.pipeline.surface(src, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(src), color=(0, 0, 0)) mlab.pipeline.glyph(src, mode='cube', scale_factor=0.2, scale_mode='none') mlab.savefig('volume_img.jpg') mlab.show() nipy-0.6.1/doc/labs/datasets/volume_data.jpg000066400000000000000000000607651470056100100210050ustar00rootroot00000000000000JFIFC     C  <"  FF'CjǑNe>egurmSξoQ?Ƅށ9Me_t4zL9M>)!c(qTc*6]aϴd.lØij)&r-}աaAkj,)tRY8c==i.9/k;#yѣf8fadBSGUJ\Ga,RzYqJv8eX[{7'\M>9%dʝח4T_ubPk MpΔ4#㾩8Skиk|"ijPH7Q7Q+9 'Jە-1&F*l<͓_u{*|g:.rֲ7Q- ߤlk+űZeTL4z;7La[n՛lj?M>t@brGasN_5M trNg64ZXZlWl1g}g*Ɇ},aJ!zw3_826,;2'Eb~ cL,Ϝl<{NqMBNU PίtuEfr%|-;r$cr7asNY\ŝն\?_&zzu֦G_I[Bϩnw7.#c6k9SڋB+%5,aխ9yDK+zy 1y*C3aE/*F)1̞:,~~{CaoxtpK@`~T~ͤǺw{u@4{. P@!"$%&16pMCRmS7#:@n.h@ =-YPU{<9c@NBs:s)nb:Ƈډeآe. *c}'UZbE<nO vhcz#-TfWc,]V8ɛ(VQ?ʠE} PDGNB5= Q ܂)8U]G$Q+A_D% Ŧ^SemxoݼP <Cdmk.ז\.V_-#:S|8( xFV_^PSGϣϣ5:ϝ<ΞTa. ^Z(7/u*cHuڰkɤ3m+}߄,jѧ(ȉ^]w#8Əϧϧá4-lgO3^63w)T#_ߛN.vEyҢpٕcJ7&ޱYKB3kP--0O3kխ~١0*Ay hE=W^0M.^R&k0fTwA~7*P>vݹj`%*+c*L–42Ag\t1gɶiєXa߳CƼ\R}ys8 f,at $aӖnJql:Q;G ~ x#6@|NSF g^yȍ;LF, -uY]le_YԄӨ;KEaXBXb Q45@ħwRp2'Y0_|I#7(7$m+vzj$Q=m]#9xhDTXi"l̮XgWX pu@}R x_¤hj-!{*L0} -ke- [eY —6O;O>6^E?Ef41Be@w*[Phx ڿFJ(z\Y. / lZ[VqW_cQQ6naϕ-KM@J(hxuu*/mYw5;AϤOՌ;z4@'-xqȻz/-U? @Ѷkv!XVbz@U3}`VATpBFh_L-.,2&^NǍ2Mr[] MɂxQٳzm_ٚSr#^*z-P/l* vc\pլ&PS$SZ}ދZ@%Zf{Ie 0hhbGb7aU@!Źa7Gu<,eze(mfR[x|,#2Hocr"g](LjTH-P3i 0։ZѬ*֘k2h+b5^#Ȧ<>1F6GBl F@\Z3ם,hO$TR%lWHgԜQVzܧTDh*-e0Yn-`Svauaꖧ'F@d}jr&>V.aE46zl>jΫؗ7Q Íai/gGrRKŕўV޺KN77J<0EQ`G2j1Dh7Zod՚Vx/U/R%q+lsz+R~ZWL]+Xqq :*$CiHZM]Y`C[L}3 {} ~f{W  Amww=|p#9QèRl4C>~}+v(֡dssIh˨"e We* +_o7!1"Qa@Aq02#BPRb?{5Q7>:> {u #zXHsˆ~`Yt*Kݳ[NqPa?9TRS{>%D붫Ik*Dv~(3yc~ :fr,ppYGS)G-sO!rjSa7 z]JI=ĺIϻG(+;6P@q*'ը!ӗnG3}¬ĝS1y%-nzT8H* E5ogYo#OqjOpv?⮥kKEow}e)5ڪҌ5WE-,с4Rֻ[6N5.g+o*zzJqURG=-NokwikzmV6*Vl!AxI)-*Cfn<#MhPR5yޟSa66fs>.lGY#k[U)Jܧ GѼ~)E+%5Igʶ~SS#GI쨍qnХ :Ȭ9=q:*J6^sGL*JB4AR/PUt}_nclb ,9ѽ3YSRܣ3 fsevLjSFjp!F8!1AQaq"2@0#$P3R?}3;-)@C AΰݫSKLUSAuxE)(MTh"cn=Lw!u4i.wה2uNg;|5PT[ F I{KXiV&JfVIـK֡b^ڦܺiZVc1N%EFq>Thf3 |~IǤzH ̾+Nk MvA# sj7^Fbb\E\ObyDUC.$1,a"١  0 6"ܖ*@G1H KѴnf?M,y?]-(Kᦜ?|`8c2hD i)C]O^6V_8aPWuϔbF-avyv㎱lLu*ݪJM:w£l|VjV>RuJ/h'3g+3I_  ~aΫjҤۺz6 d9BSQ*L~WMIIЃL [%LNz'ڔo6ӟ.}Grir߁D^Ke/)? Eⷥ(p=}C58m?e6:kF0-̘bݮ8|CNUXJ8h*PDE^˂QGNY62I#v;l,\xp;߿UImn8$>S: &?mFm(WJCQ@XzJytN84;$tY߄/Jtdq#p)NLrƯS;ПQ<00mdbM)yl’}DV#Žt$w'`U ZK9;i]Ok5܁8EC 44mX ]NNeqv}Qv_b:d,vKٽ%|.R:_5Zսj;~ @հ,}MBl>ͤU}aI˘sw]i =Ϡc*/W_ rQ_C=A$r8PszHv c2\u]A \ׅ'cAQ8#wuy-| )O ZF16;5Fmܫ}c < M6){QoQݷsސsȓ|**J~"QUڄ*Rϋ!Uq?i a_\rND}I~2'EYVn5qme]{^eMĝ4PRu[^n*uڏMMo NAp7]0Ĉfݬ݄\o:yĜHnqHIf84\-CkNV8x2-[KI-z9W4ܹRRjYUѷp^يbɹKJҏcBEJ帔b.8ߚؘĦ56[5IoUG"9,nWrSK*-KG,><Ư6fPӣAor[er23Hu:ҽ4{tV<Fk 5MmsVƒc)9bPT0V9FmFt$l]⼅!2=q`ԯ Cd~gOaO@Ӕ^1"y)r>WPecgĦ銹J9[ۑs\Ng _QywJE^GXWuᭊ,6 Hj}Bޣ[OIX#LaL)Pc#N)##GfTr$m_ae5NIޓ1}i\>Ooh#PD C\_&\7c3.#?*Nه%.rђi'sNl+M#ZĜ^ Yq$+{"H }LAW H?$:sN'֧mRK(q(LrUs3T“q:զHnq( ;#IJוCųHI[7 F3PWMAD ፖNZpvٜnIq_)Ϳvx`LOjC~Kd0]("Q<՟s`׶|RKKҷUmSօuQuP@Cb3_~GӖT7`j6ѳ̥nTv;\el:s׎9Jdx(Z|?Dw) (ʓ z:/#@eCr'"N $iw,OFb}W?.ْcn0pNWd{)|ǛKo8Sv㕃:?=o3it"Gb*Ef]?<X?9%إ@>ȏ <3淏^'Me`+@2ws{>u&Uc128B%uƛ7 "ծöAG4{qvd9A'V@YI<˒0F!.s6yG^=<8=e}*myFQj܌8ona}q٪ >X;{gmqJ[.iPe2ԑTF,}Y%Sr.9)..]JQ`(0B[OhY;Zo: wil %+eD4NGE#aiJb;jʜ)gs?IOy JI6dxM/@ik9uolÎE*VS AẼy3vzێZwVi*(yY$ӷV-ly01j0%Ė͜Co-B5E9__`7WeKm$Sꏉ hζ?^ʷMluq":/2RT.$~Ѷ4$7/>5*-Gy/1~,/ދ_ejLP K߳ 6eBT=|=46g,7O]۬$BM~w8TJg2z'ʔ0lujHy埽+՝!A܁k?$ًOQ˫=~ڲ,]mi` qRRӮ97 # 3ŊZmv3{+ T3wkծeSkyxaoHǔʚ}zIPƸ\ /5j˟6v3wTpiN2GT_9gndܨM_حzZ[)[}ui Ey>I?߇Bޞs $Vv}`OAC2xLUxb $ߺal.|AܥpYȯYȥր[jGQXVg6*HJR,^ҫbfPZ7BnYD`͞TíGz]LⳄĊT$h1Lz)Yrw{Fo'Eך m.n'@Ui[4ۯ\)gz,QyG;7&qv6,7Pt~uMNhJ>kYJAqZ܋8T'hڷi8~b]S#%5QA1;5zm/"} /hWb9Q#1ujc[z+l}Rz8=Qꅬ Z# Fb2YqZƩ=Gq# A?  _P%NI7Z ފ~q;]c U .; R0^a&nI$j%7.8 V۱IDyDH!M[ë~[ۙ}ԛOkv<M]TٮeB}NU% K *lfQRoV3:8;/wk*++m:ï~EY`npq+Bt-&8jE@9_sh+čĥpjE7w*,=cjާQ>1+h#M\,^iTeeQ_oF}kCf6psx7ӝBHSfrɏej>6bI3/VDj߷qr/ niJ*o.E%C}_UV(hC+%L.FA"A 4FO5ˈc]]([ROȘ:ꂇ8޳cu8H$4^U{['A_'T7GhO1HEK4anB@?zPқ} |1Q[VyKz\%jҼ4e ǫYi)2T<\t6HFK;Cb͇a23\߈NARiRSe 8v,!6|Jo֫h=Zle=-v ?f-,68:}Vbp 9/Iodw S SUruD65z ?!玡8DΥm֓pz1ATtr5>?p%^s-W+HPkpY9ܥVpG6A哵1bSk{p,L;1z~Ծ6 wXSy][eٸێ2fs@J ̿WC'YP-p[~aui: -hJ~C PUfr)HMMT'OV+؏b_FG,&:+4?T>FI2%[n^Km.,AG]U)+θu~ m]z& [ҡb0gy V="J̒)-ؖR9kcҎO:bvrRL9:;PBr1V%_*QnTM3>c-[ߕa1z骹{+}7/~M p4Oǯ}nP+,#,Yf]X[wK->=<<yB6k]XLvS}Iܯ6uZW攖}ksA|1g`:ϕ }Y?QRR!VyJĄ pJuT B܁_|=LO8wQRZOY.< tҗ3 why0<1Uj$@MGD6sڽK߉WG`Na.eXZH%uWThf:+յvh-l5W@B{S9P W~=ntRI;6f U(ށeė3m XJǣ|>Dfx]n~}*r(Lz&T)6i||akmA@9mGeZ5JQ9+_g^&Sxm1'FŎԴ+Tlj~CCq" .m̍. FWp5j9  VllT,ueu;r\UvKOKG0  p Ul&Ԝemͫ?©;+Gfur$' =#!V6O݆T4 P{+5X lK<g`T5Mx]l}>RX78x]9ibi"OUVuJv:GۗxXb@KSQ3H/ٍJSs)cJsg,fQH| 4kogamQ[dXͨIVg7}T9lƝAC\)'x8y(-JrD#]I~Nr65z{usҹ*m,k 2S}=IkX󚄄Ki<ׄU)Sb,:)>׎R;eQ(O>"Tf2+}KTZ% $ubXn۳zj!68obJ֝^'{FmUkK3{yjyy \ -ԊI2)j3)Lhp(ه[ `(]Θg9H_. θ17AQ%@[Hu\5G nSa+!1AQaqP@ p?! qw4K !/q6k' T"6٫/`gYM=bQ]6ψǥ&|B" 띅`3{q4{b>@gsAk7%W#}bb %erq$Tpu̘M f&c)4B3+s/,.Q/K&䐄 Z_Rs Py vTyUF[$ȷgp+󜒞*\t0WP'̜AD8meסwi'ǤEM͠/ӢԔ7D]44p*lc2O ;n\"/q0M+Qh\d (Eluұ'0&{k+y=,a'ԔG7W^fcYgA%1bA^?R"fG-cͮ!.} qH\0'5DC2{I}g<}b$MNDVDKzZn#8(Oqq$Ot taء(xaIC&9HS4ߠS`.Cj)Q  DQS14YHL3 ewBwݽ 3kr`^}\Lxg?\$0(&@4YQMK@V<=May VE/&,*g>`UD~;WXTsRszGAuIڰy5BzGY s{ Qn`~ `Jpm. _:"'gPXd77_7 4Ip vV-(GD2+dAp VH?p˛cr"Q1--BL;" rE|V"l#tN1=")*yZK YdYBy˰@Us;0[aSl(񺝞rXh/~'*TA%!:@\zn0p`D39wA&2G0p51(=NnahQɐfǼK?:c : Qo9)Qϐ.Dqx=dEA3rJjH&a~Odp8BBOP!Tc Mv-4 ?+#e;G9'n86HKĿ@+SJRM0+Sm>ɵ Jć+`?8cE._8t_FWToѬWXܠ@Y KUy ڰf26VJ_9U"n^x0((C^A9 Y^"fVUrGU !ʪOGO@D Kv]C{/iEfr)>pݮTINۋ˯yr={7$7.ǙOX4c)OJ|L2A4n,ZěNP*.3p։[Ye!?0uJ+6CGm ]59r\!烠2 j rMh~D,dp[yJk H2.=p -3*2!sA2"c@[L9w%~{{ʍZ>\F_"ɣ!o/w%a e/|eZ_RnbrrTMvGEGYP n0;%!ܻY@1dwq͉}Àgmϻz%u Ȥ3vR%)' 0d2 r [Ckh@$#!$Bh1Bac][y ֺ̮gH,kIӍIx80`>cíO b\pQw UCA$+Ea8yNYU"nX hlȖȜrH&'mDx%䓏@FJ7$b Y*G|A#fߟLd SM9rcS϶KjGY;R k>@PrMI6b-K\$Լ}0|h]XK :-x@N<&Cf CrS:<)გzчEfr)>rlwOʧUPp 1 {uiixF8NZ ,5diJF 6`MrEĢ3ԀӍsF-%pb5 Xar.@FڢaSc1t)Xa{= _r,{7Gvh:,2XR1r|$۶ f8/M F@͖L8\+ihdf rF6l]pPݼю A oh:,2XR0V m k N%+[92rM~2Ign2vk md]e01MvI<zDQKVQ6*6Ȉ=,-l)(?DbX.DSnlvI| F4G#E 3Bd$ "_66pG Pq_X#fjl#vѺ&Ekf`AnR4i'N'$ؓB'0Ý-@(N|mO |7Lp3H'#ܷy+SkJ8,Evqc^h HX;?Ǝ>3Zİ\]$]w+@B  ƀ>q%\%ˇHH4&B!"kLwJeÀ2&D[CF%7>BFmY0Ji07F ;JB2H)*6;_GTx2E#eqUBT:{@lqٯkwpǜ+}@^h>D Hf(O7OζRScaR)ebLm̠lRP:~\ ytר渕=Q Aɥ%ǩBEԟ\&-BR{a{/j,.Bf-6Ľ>&LlCaWI "M[Kg4V BfIiu=WeG(7^P^)˫T{$gp7j< ! (Esr1` ~C4Н>^1 9N6Q.( Ǟ[3 v~"KDhHb$P9<)g<# x{Cܩ))MF|R +v)0ǷfnQDqߧZ Om3@VpAWLfpˡC\F (XT߇=05iG9]_.*6f 㛒XZ+C 퉁&5H A-A+!\jY &Nu0ʄ$:b`:f&F- dXށ E N9Ҕ# z@ez" eYQr{)I[Tu{^*ÝojBkNW-I]P)!1AaQq0@P?|lT ?n8\~xKYk6~*yV_sIDcG‘=l}hK.R`@Sh 0AmH-yBK%cKHEiğkNꐳâ%pB4󐁖0VץD'#+n䡆 ϊ,VaV-jX VUo쾠T=F?)]nM^uVJɒ&B%,̝q%iV≷~$ ^yP #&Xɱ6ً.` $B#rRF8JF"@\ Ǥ{ ^]LjFh@W)(xR"h&Fˡ3\c8zt H.A@oZ̑,BځDfojm7ˀF(Q-C?>ܣd +荀,܃TQ=f@#BK"I M&!1AQPa@qp?ؓ/inrиE;0#O&cѥ:ݕj @6sHJmb_#(+_(z*I>^M iPa+0ʈ@a$jC)|}y #VA :̺{V.q* PUjy@Q yD}A ㊚j/FrX@+Q J̩jhTs c=  Q&.A3%H`@X5U1>g{ApGHoR*|v3΢UWK!h7VKԃ݃h/L o`6A<4"N$G; BH"(XдD1/CZB *^*5_^Ŀޞu¨CS RՂ0(Z[(VD>rnq!]iL8tᯇΩ)QxGzJRDN/f#)X)F?9r¦i't.؉`jhb>>f0"o2oB Hhs؊*Kv,8|VFGޭǒPPhnAo0mJrFG"ASN}_`hYw&|' GaDW!Plw{' IPm^iNNlxTD,}Fd@$PzŢLZdXTۖܐ}l|ar(QTph0hM )8L %HvUKQQ(86̿ <@y@mE9(9΄O5q$к"JǿѬ#&8.dpdG dNBΕl {9) LLc(Ƞ8xrypCI[9ò$FB ĴxA*AJ*ӛBhDMwRlp`x`W{/~L})npOvf@F!>`Q8 ;v :\֞Qg*"􁢹0tpIl* TDCECK o= s8"֍f/+b¦ J|DEL#|uB8" NjmG j׾ّ͘R*l(. 2T_gE*mYH P~@V}s6L !~ -V jVbyAB?ӡW-Sҡ%KeJ&%R -mCQLQ q=7N90ABD:_p0TE# 0 U(&/C""ۼL t0 _Q5V%/@)@c.&+ $x-t8HQAW*e Yr,<TQ.ڃ@90`µ xi2w8dffkkSaF|+]Ҩ  )FH&"4-*XP)>t @iUl(uO%[素3y""/*׷zѣ <r)$k 0ۏHA]jj.A""/^9",LJE]2G M<cY Z^%v$y.ӄ `sޥ z1("k9Բ}؈}K4)WxCe(o,zAÿHO` BǒRJbh#m^ KHЂgLA<"<%-;1*kd$t4:~8)ϤH]M*x#ZDCzMM Pg^5W}skhEZGޟx,TB6F M j9̒_gk%+ s 3c\0~pMEt2+(q}Ѭa;) j&(ڹ柡ʢ 5U6LqY> ]3*e7|/x(U_Ё )EXPtr>N  ӸveRzU"QXu@@9.~j HѶ֍581CXx"TBT $b4PsҕyAw :20O\(dwd& Z aƠΨn$bD]fʏ3BBC3ƿE'hzn_ffi#V(O!P#,(L82 KњiO*c$(fG`qDj# 4'-|HjM ޷QK*p ¥ebl랿UR-{;K&ϟ~|xj%8P`S]hXh(#5> *b!@ؾ(Ob;ZcMƂzwW†O"`02@ƧKQXH qB A ԀcU3 h3l T.Bb}Ȩ gDgaS M&VʜE-qңOe2"^gm!h[8H3 `F@ :a8s`;sOj+LZ .رkt#Y( ? Ѧ~P4͊dKLEO՟F A7Q?==6t0#x _4D bpxfYwmO !r~&T M9؆¥&6I"+`E vd E?@\DO\%:3vͤ/QHd#+,z&\eG9x&H*R:<Ȼ' ph mXV |yj- IdN%o{ eBUHn|p5cKn lf,c^|2arghER:".Td# ;bA$x=&ƦHDS8ذ9$![eЂUl-j]0.~= iTByjAT!+NhE[Y$DE;+^.vtFUϥ̴'Z{nn\c*ݺ{fL2)A3keRn22$Yo譹 f=ث6yJi0lVjʳ:uw&|7]*"޶>~vt칶Ϩ|~a3wX3:O+\Ba>;HΝUHUGlU]Z{ի3ʭ6MnUW1<ܑF15or{]+S.6# {;aZ_1斻mv˚4G>NFcYZN/4K۝-3us|j̩['KlGbش;ɥ{UJ^',O[j[rQz0ʕ?e5f"G=)Y%ַsJ+1ʄu]_ k>)7W6|ة*& \Gs/0:ײK2N+`xZ옱>cb٪ɡ݈]rcscFϧ܆+Zu֯Egj{5֏x+%̭G%N[ =]mk4Fi"V:3 EEya"i],NQHݬr*jNv$sEK#rot7O E˪uc_\9.旇EU״ItجMM5CtaZ -Liiqx_Ec>>䭓@ic]ϙE-Rk9S駨tCN1bZ[13Ћ$)Y?N*4Gwol WeCi+BcoDWV9uY;1=gRWú. klLw|Oϼovx&d}e[%Vx, 39Q.Gű)"th ~>vk_ϬdzmIBdJ\ZxUw< ?5(Xߤ(u$n_U+5DVM4sVjըbi=xJ# =JR|sĕ iOnFvc=ONq|(بQlnϹIܜoɪS[#m{i;1\N4lk~kkVפ<[d}RF3ftSUT2':9y :|uAHtԥ6{k+;`R=u%{ĻMMu^2lUlRmc$x)Ufy5|N["N*ޤoQ#._IzgkkWpF,h],jj}$,ھ |-y *jtJv2jLmd:Pck!dt]=QXejNl4I$C;)Kz5O~Jf+i{Xk-oQ=KjW,ֵ|p?["MMEo #DC[Q}kt 5gMNJVl :3C/KMM̺4X=H1Eb[$s&?:;D{?AŒUjqGVKdݥM>;uJν $ v*downsGO'JY0odӬ݇Pբr=F6s73"*7zXiqZnB^ߣBk/b9Żqn,[,ik)_Ÿ;r=do2n8Yo)aӥD ;l\&Գ% d>nd"O fSkFg4h_ g&HdrwR9ZOk|Y79MF">?3X\tص;I7'Ȓ+Xr H4W'p[ez_CҤWV3kAL4;ޕ۹ߺj9ҙJMnnfOo?<fٕЫ{ {>^{3K#&z.-:۸H^wvZGvg_HJ"N)c2Y1YVi6 (8_~nkyd4kdjmC:15~gͷi 7U!1={(wJ{W{^TZT/$lvM͞MȌ޲j4v=TK_5d6j]~ BfoaTdG,+Z1D]{F]nWn1bչ}ml}QpjQcmӚ:]%UTwY;,Gë|'MkDK~o{qujgwHi1ʱؖoXn:x`')j³Ld,dXvclr7'yiٙHչe2Lzk93Tdhoky6ią#-VrbvThbwX[3Ak%VջVdU\V!2˟UHE#9\~&>~>6f^2:M)ç~|Q~n4\E"ĎEFnϤ{cG\&p(Fhel.ϑ\X!k>2wv4kfR1ucg[jvod3UȑqzY){n3+S#굝LL6&uz,?/$_%TN7?I%soOczk{Qi|y8w)X$vy@D罐^/==}3ՖkVy .j6hw1zgc缉P&Oy~81+ fxdrvuQ|5{| kkҤ];ע-𑌅=!zSݰ nb XM7'N~ )6"5]½GJ>#187]qٯ.O#1 Ko_]dzQboU푼* NOLCknDmV"ŴGxω9ij'SGV霷^>#T%{vI *"ZDdR;KJ[*ݲXlNW7Y$aӣiueC&ibin1L"8/ y_(bER>Teυ ~7l"%Q /cu2U7H5l] I"]~qIp8x4\R׹ΒM^0wIz'L(ΚUṧeDֻ>jO[ ;kVLV-EF/=+YJ(̝% Wŝߨմ!$ke/E݄R`3$fSQZƩ=blѥźh348"Mg_lj('پ(,M30aΒ.C(Yj^U&qʷ9_#Y[ic1R{m4:gGټ[D$+kuÈs]V|ZhlNGi@q ;wfɼF=mּV]ڎ fTYԫԷB;o7e9 U^!7P}Dk[g6n_\Pq({ 9=]edѧJrV"54ڑ}Yq#c2=b^^X.^ F^'be]筅ϑj۷J_Q2SokS t:䖜szKM.GȷJEb.jr_Jq>߼pbzsئXn+ު~GÖ0k{L]m >']R73Sq4H^ϱkdr(׺9#xNo|²Zr9׾K)mm<(&i`1W{߯{o mUK?;dqiTZj#1]bGxޏwstxrT5;q {G=FnVĜRD[ëSxq.qq%}]һaۗSz+,xs\uDODXiyW%3WMtg_G]oX8p&iD&lUޛ6FrnRQ'c:ƨ,9w+-F$wZ;Ico\v nP\3i kRJ+&S]ZuR0UŮ*!1xO݇cRVH4ߞ8c2P"7~޶7 ˂2ӻqv۾$ڭR9_%eޡ@_SbpUk{H(f{_ 3=k+{MfUyaً Џk"FYuyɸ3VIKff׹[k،-\4l㥋=yiUsU[zT^Q=Y._u'=CDV:dޜF΍fƭKnD;u=Ѣx8c޶UEC<*Ӻ6$Ҫ{'5,om}m D"A,Q9rL665<0脝|l69ʍeڨMVV>iI#R74ӤU.WjZܷ\YR{Ьx@uL֟Rے85Co_$M\%ry+G趦DIOnX7Ƶv,FIꆕ9T.TzJثlcby o،W;,*/˰5u:mτvBGF&'43g5j|ïE2嬲dIR+UQuvYk%IQ,Ld\MS+eв8_ʷتJ[ks-v9Ňhk9xlW#}hlxkW-N8_:aΫ[ 3.FڡgjNSn%ڰS^?rLx{x{.e1nXH+i졍)g#E6g. zqC'dtdUwpYK㡩 mOu;ܱnӏKbոe$t4KbK}cOFQn#߷'F+G~2aEgع6-%U+cOv3wQgU4hSi[P3Qt0qQj/$iY–j$k2tQVQlYeNRv9{:4ܷM#CXLKl7'Tfxzezǡ#*.RdKwΟ"Siv)TYwE;҇^᫩G2VIsMj,nu_K[r3{6 TlTmɺMkvtQ+Ƀz:Ez;i0"o k)3~OQNnk-\T\1ɶtluC ~$m=B#NhYll88t=ࡖD>h[?3ӍwFNjHRP欥tFt Eog1wSE:?fIᏔ>VugOg(]?TogOgٿ{ȣc:m? /NPw7꿩OFb'sTBnipy-0.6.1/doc/labs/datasets/volume_grid.jpg000066400000000000000000000664301470056100100210140ustar00rootroot00000000000000JFIFC     C  >"  y##C|y駏^Ɋ:;U mG;yjz c`ry\dm~7y#qk}[`al.m@];ج%Eۿwz yT'Pt'-u,6amWծri~xFGZBd7mkErkK.WS<\E8}U=ˮ:X :S)5iZQIedd[UHjv3s[8:&5?7nw%zph~Uo7+Lשp<>Ɖ!x YEco&YlF.JQ[_Ddpr[CX5;VU*gGJNTƵ񊽫kqV?oT,ak b+$uEH{/;E戞sT|`[xM/3 WZ ϶ҷ]V+)?m f?EAgU> }^mXepk^!x2ZSu ߬ʮa<7xJ.1u];I0uQjRyBB0s*uƮi@j-1ȯX41mN‘2CRm7Й1<~y2cWs^,&먧GoD3l!zfC5M5Ѽ]n3m7Qٔ~M-,3jiEk1EM1hE{]?ο%oċ!r.Z)P[iwE)wP) B&Zsc[>G@>岰#ym׮I,|LGl"fdho/C&5.BL*]bnIi jf Uպ1\b齑`HXZ>cRi5l8&B+ɝtviQB'\Ij$E5Zd뫫K}}YA`_Cw@|%–VV^L"˗!)nºÏ7_Uny5+nTֿѸ mS |>\e壼`;*! Sח1sCjk(O"(2Tק W}V r f!UjbÙq0ftWKuA‘0Cj53Q ,&&q~Mޗ5mؑXwv2ʪ6?͎͵P[‰0SeBF [&?]y9d0vF.V̾xc*D_BDym,8J{&`X{{,ܱXWm.ҟS\F/ᨸ%1%S~E1eb 髪P6-3+t: -8ϽF@İm3wxU>On 06)p6.ONEXs8~eu~!tFb|P_ \ZWS18́bcN !HiuvN,rP8c:]!u)iK"Z1w`&&H\z̈́5ΰG"ٙg<WvL<9u.e`C\ R!^\ EZ ܞO4 n%S9~ $2ʚ;EGz=te+Ym $QS)fLHՐĿSHaUtV8sTh%)^l.{QU*@}m f, _)t?ˮv4ઘr;9?yrhѦci#X^ʨSqB\r z^of,oGn2کeU,OTh.ӅMKJk%hdzFk)E<~}er͍}E^Π:4l;7J1|UX6[`ʪRN ŌrBkz~u/V5NE_ўh9z҃$s˜q!8pxS.|vޘjE'!%S(dYS"E%c@+*um44Nia^`qJ],+F :f3}ꌠ: ǒI@d]1,!+L-1T{!siZ>v~2Ӧ) $+g$*m2+ԋ|pao=Ĥ[A:nc'Tzo{r%"njTڮ/.|Hެ ֒~٣*w}|jo޴ Xѝ~[+B,XaX{}M1f~ZERm, XsMaQtwWdv"s$N&sRn3r~%sK˛:1;S-y񛏿ļݜ%3E{F_WKvkkl%AzPm?]EFCMjΫ@2>!1AQq"a02@R$3B#Pb?FU׺e؁wA}o.br#+'†<>!jSu._wby(&鴦ēo|F%ܤOoU"^\۷\ː 56w9 aӋZnbNgZ'(}+F%oP(%x~&8WmBd{LX9>ɹY&zWxIbq ē ьǜXKR:I7HsMWnaX#,́˞^02]Oߞ|b^ryO+RT%vyr5GE_l(p2 DIf6aL:աyZkl^LxDtGq[$ H5W!FV_$|ceL`]*!R6lM#Sz3s~MN`I1 (! IM¯xh'1,K௖PJMw{}3E=+J]w~Di?8̷VJtVӻY6yU'B+rl;q$*M\4RNckÞW2my70cO1˶ښ'x{f}o}(T] {!GeRC ! i8FVD/)Hq8 xzy./HRZԠr)g X|q"gxRpaG^vjm-@j}F9=T52+˅mmRrY-k1>VVss;ƣ GT%˄Zޝx-‹'v.qC Ɣߖvy h,cp~-SL X̝Te$*,6R./N()F4:uRdn?8FS4ԆMjKh*V'R9ega\mߞ~lI)zX[dc0]Lq '!W#6rnvfAJbMwؽi!϶ٞB6r%o# p]ůp3 !7k{IJVeP&wTo$zR+LSH_60IX #p ͅw$(JJsyRi`%{[3#;K&O&_SF FAɵ%H:|nLJ:ipz6\=LIO#ROeG"SKLq}|l'-FmV4H 'yą6x.$cJUn6BV,ձӺ+CHݬG/] n8/(n3l܅U96YZy9Bn>qg3쩙S0da#_!YtU _q%9h,mUR e$۸EtptVXjĝ :8\pG2{ϳmΊՔaP85NDz':!1AQaq"0@B#2RPb?n@._y(q4 õR³E6_[:Xs>W T-tBbJSNDq9>܋Ʌ9B[{*qƴw(HTT^;okk/̳%U#Rae^ơӯHo]PsMNXA$)h&.#a~ZT%&JU}vDbX-LL?6u(9T~]R>QAٶWvcWlQ$x}5Mi;kٞ~#C;f s׫Ft*l󵻈чwRݑ#[j]ВHyW%37R^x) #Vfj*b}'3&yqKOtnm?'eM [! ԃm%28T?\q 138mGY~VMEc'A{w񍏨Éu):78iwW0pDRi&UYgdHĨm; ~)sE悔+kHvWZT+/{8 -Te;œV1D֛S\$y EoyFzxE *?"xD0#_9!'Pkw0B)`2]"Jh˺MNwsbJ+%%i}}r-|"q_4 piUzMAEZ^PqfN(f| u,7a*&MS/.bBvZmʮ|;U?d}ep;Z6rLzgYwe|MK~QJp zi~bN2孍g2#WSV5*x^zRf5e˟K$E6a3ͩ6e)k⻾hC&acHl%^=LKv}cs jM2H74%w;ms4g]u"q)kj[l2H6=|wĤ!a+ S HBl>i66^^DYbotKO ,jHqhXhttSd;T%Ļŕ Ʊ7I#8JRA:]%7Dj^W  !1AQ"2aBq#3PRb@CSr$4Tct%Us5D`uv?hUG,SS6 [ U>m SeoeKRs򄍠 qao;#<l1N@arSs\GzFm8f]e⛎?Ѷ(EGZ䔫'u6ttݟK_CqG@'jqj2j ;+=pY^Js)Qb~J}!7Zlu*EiI%_{᭭0QJtsG<.Ae)_nG\mi)Z.:*Omyb"C y0 `%*e$ݥ韺/nÂˁIP#6h* ŇjI{MGQ4߷OW8Ǔ+DYe^^8U_4 ^#˥htESd `bIQIs\➶ðq 5{/H%NT`a}_D}gN"NJVΫ3|N-^\QO֙U[i!hP#-)p:XbtPk)Jo6!mWhj-ć}Ӡ';Y܋*NdDhsQTǙ. wmJ^Xog)Rݓv k6^-ⴵ!l!i:}UJ/oOBYt _+$#_1tR:lSvg᯵T!:Tu(c9/sP|!*VT}%toY} mom^z]sG.!-_*y~s<2J+R4 >=O!ѓ*ĉ2<qXZlRj{6D:huOWӆ)5-%7dXa2CP[=o#+5 S>Gq'Sg2 1{)UPm~ d':[Hb.[iT*JGO,&}i/V].rd#y3%VEpp11 2^!%:] +Tw#2jr:19[ʝ 6*Cl \ہ<ݢBgqWQ3%Ay ݟ<"nv:qS ]$ y]:q!S[l\mqؕTږU-+Rkw*G߅mT7cy;JXn0++o_ .yLwjjtӜ րsZٵIN V,-{Dp9N+p!a 4B[i)0b%xZ+[5۞feoKg++5nYRa?cd~sIcpC/l{fgP\fĥ7:_'0Bi+SC)bMJBd88KB-}:ߨUgKZom3V4Ҧ#dgOu U{iBj;FXl!)UI z<Dg^{7[N&Jrf73c'V=NmDuѨM)Dmj1PsmFUq!S~QmN[W6$L# ~=8cjM$%BG>TPS$7ph!S} C U獘LEJިyA:[%>0 yj}墒mmEXGKPeʣi逘M"2+9p7FҒn7'A"V|YA0`uV{'"e OՠNDгPCjaQro{)R+gJm ˑJ$  &,Vmɲ[x#*AF(ٰ;6̵J)6o_󧧩ĜDYW{uvuo2tq[hozq)rV<(&"Ens#.#"ں>jH!Ytw/^ۓ[0uZS c i4IUl5g&EG|B}EPmoӾvYh36S P=m5LM,4 RSdTowMI'/n%hXVpFK~^?$HvK mNdӎopd9@H/'b!@;=F([uIPsfKO9[׸)̜͝ngseY BKCݑ!)r{J]܏4 1P'xEvG9Vd-nFNqjb>x,6엊ɾl.=N)B[H1$ucxOv2Zw:7ibħAEJѰ:u,RpVrl?˛oi\ CDZSRWSdE4pyw|+}.Xz,%ӳ4ݧLY4Y%l/ 43 RxѹSNuHyc\UU)6O[Bp5PLw[I7qdIz4˭tb3Pd }Vsr=1/m6I~*5J܈PGy5:m%ii$_Caymܥ* Vz +f)Y*78/1`pon>qQn# RU=]կ^~72ltt]2\p"FKisNۼR|G>z;}eYR܁W>H[~ʛKbMo  !Qdȯ -dkoS0hGu8--lpT-ENBŝ>4]Uv4%I* BuʚHCBqJq`%>xh֥(H#3%khij>*fv,/$BAV8Rgvg|.M)G<\qpU'?ny Ǝb~Cj[ZDrUVR.#9Hfh\6Y);qn47=F6w\9m\,ޣy!~M.'jM/2dGCUkpREYW V6R`bvŲ㬥INQ^mw[7@WY%M6;[5HEks5Ey'e >P6Z 3)uŴ8r6T«e GRs>[(O1F}NSEr"#,MGʇ &O_ze5̙ؿ4*i^<6+:ۯSRP*=E [p7Not.*)jȘ35 * ?;tM\F=M~(sYTzE3J[P$\Ua2XPq Re>On*cP`t7)^cus9b+[rXe=XUm@<5za; FU[]VciS6&TíNur0[N ?VCyR/kj<2KJVZ]@_8۬QlFNuDam4+KUkDݺs7̈M֭ ]AL`l rGvob3IT]ʲ0m$6':-.s0M2'J}cu>W"r ejZZ,^]<'h^]QVo(4T[KyakNu:^6a 'y*6m22l[-"+ߝn=#-6! ~^N};jIi7<0k+V.ȥ/O6mQe)sppeزKNW>czG4W"RdrR*ABVk [ 5Dȇ;ޥ@^һ+Ö?[; rCO3$m>NvAc!ԁ}OiiQAUcCI(̓Rirj]r\0留(Bl3-ũWy9m0) TLVih͛Q=I9 uQK&T)]G]cP|8m%I^H8?Tz/>(ӧUejr7+i$hh `}FsmrBf}~5|etmKgEؤc pĪF̪ldN'mg~22٥ }E zicj P4K/(y/au$36r1YP~Cىӈ,b\ 8 iM)pZ 8>ш n 7wY_LH"3T?GUۈSvZc)tJs߲ae.R.ry{Xv0I[W s"3)<1r0D!cW\\|OD)ІۂZ!b`9׳(G <%74oZwqzpaQE"|d׿ h0:M챢+udelنqaQLF77/OmJmoEcb*a_A.,}@|nZ '$U!US o{7Sswd'/p<Б6Ô^ޏ'{.'^Vnc)5@QQGU[G@Ӻu,q:n B 1'fXq0$iUGڎ4nNV0̘k=>qU#D3ImhM@0onjN#FQ^UJv2\dܼ_ɡ>lHE_)6e/TP-xSG ؽSHV1m9Oy9}S^Tu?G{إ УƊJO,}VfOBys 8mhdwR{bMjb J*=aæmz=㢬}9# qX\Y)‚ԋm[2ZZtguI'#ǫқ+Ij79w.l3 뮩nrUA(mq  WHm_iE+}IDlwCOh%V8Ku(z\.^9T273u>$J<$:˭ zfMϱ&-ra*~2^_R)cΧ.s=^8oevf U H}_7Q*](UwrLaԠz׀n~>BZ͂G\7CL7;-̕z5_"Uq*(ZngKs5Jzn%-܈F{$ pkpdqe="؜N eK>(Q?^NB)tHߧhH"n\4-Ge@Hgi6na )lG$gywIRK -(P2 }>`w\.YŃD$ TS"=א;/D.B&D'\ϒ+:nƅw8|9Y%Q.3k۷m>`pd)si d2_Hk$l؛ᗥ<3.ɺo`0MhJFvwm>[īv I㊵(N(Os fX DhO5)^˷Nʼt*Po~^x >=ⷑdI7?# z*n yþv{Q|8 qbPJd<c{rjL@m Hq(s(瞘X QuR6=БQ8NTunb ox{׶\ENSnzRwsES`y^P9-RAkxSki_/C I֮X>g)lS_O~ GĵWӊS:3[^$Ď Jegx+;Zj!{1 /t+ǖ\)/r*PFӒ>j.ԝ@Ѵ;5-J-j6 q#hb1> wYGŦ) Ln Cc]p1-YZUu/D:>;Bo-/HS\Ab/SCHm>=C׉K~l(w6e"8?"HN9N2Ba1)dcJPo?&grG;5> %Ta0jmUni>Aܪ:'D(] EРG(416Nu\nIOɎ*yM[D I&bתeEA$gzqGAm#UJcms<5U5-U)o$ /%V͕iUa]AyXR|$=Y2֕6Ҽ`_(_KesACR {-)!1AQaqP@`?!'#Rφ2u) fKaZBaZk' G}!X=Rxj2M V*uX xq&6'Aa1H%K9Ā:DA2@ ?YnaH1,FB "Tdj ATcD!-Hv.$KCDn񼙆 L{%!b{$ 'L*Dt/xR!WhzL@ qKtJ|9H=|4=@PXD<Ȧ6MtYX[fE։I .@vBE!BGS[_ V} )൛p4PN0J>ϔZ(*{AـJT3t TJ~rK/M`Epcʬ? '$;`l"AzHDf 7"b\@oK^aLzͿIR4VLj)`;m$͸(N;Tn`J ~I"˰'p*F9Xė*-'M>KK-yM!1]E^,\,$^N5T`mGKM\^x0(pa 7K49EaOcd](kIn[`IPHmE*@!w?$?T&*]-DvhKFF(+NT>i.M4#.+QyK^:l;Jb@,f8Z"`.'\b#A`b3cy%\lkqq,b{Zab|@DѐC)jH`bفd:^8B];9"'C \)26(QܡidʳuP ccny:5Ԕ9*NE[cF,Ht~xh{]+`Pf78Hl\ aCM+"@hFіd[&j1T+=+4CXV{Kv,7 C"X/:u!,&yĭצC2yXHaN~aа1p&P1!,"1$BTQs^FՓ6KH1ݿ#D*skh(tL $>̗@ɉ=?)ih BsEKxpĵ,סנħY2 =i@Q[x;cÒ'>1[=u$\p|sJN% Nk<N49rY8 vɝA,@ ILKВA8*謐&rG7x2mcr,PE*Bd=n%g%& i0VFUJ 77%XT] $,Xwi \h@8ǨJ88 _VMM;4aM.fr#iFsIrrBӖop9XEN7VMIR*=~$p% !{)!VUZgkL r("/j"D"B˫f鿷 Yv5!DP9\Bgyy'6ALijH\5k `~[B^_qXfAaupUB;H86l#*6m$3eĔ"ʓ͑:{l^e@SBڄyIC܁9"_>`$̐ݱon,ę;a1Rh<,y1@KD0Y(JЮ<%#{WSVH@sz,pAPPj#ֿ )'u8+HMIK3O .GLnP N;`j91Cf, BG7 ?X - K"Z['JZ!tRC'np@=>-#珼ЧM Q3T9\!,Eo"iNxJ=[o6 Xcg 2@Um` RɅG֛h`*->O} AY0kI(S0~R]0WG񊼲GEl_U/0B*-AqMq9PM#6Q˺ (I 8v@`܆5{^%8u\ZH_@FvF A%6 eck, P0`4$'aUnICR#ѱv_,jr$Bƽel+ *UٚPzYt1( XI$6G̤"# *+HtPFJ{\+)!q$C4 j`!Q,$n@*y4;".1(bL\rqxL Z6"0N)@c1/ztYd'D[u˗-8a 8:܀+HaJ-".n%/--$A7 N[>msR)#Jlڞ@<weg_g&/K $)DnN YE"eUז8Q1y?,R&=QFu X_)oAk>X:m " j֌k )*Ze!0z Fo8ͥAJتxuP!:'FEI;VtgPHXrw dE P K V m~. |lmD,$ǘ"4;}O,N{AKPځ%6X dOy&Ezq[d@%`R):(4yrYA'[!kmf0hJ;P-I'3#vP0×/׀j ;Ɋ,}`-<<OA2?|n;s)'|)i~BxB Ao5v",wXM!OL`eVA'ZĖr RP(.#X-ELW AqKWW^\KD‘ $|d"RQ%ݾ^NlޱP+eQP`sEnGsGLI䑬ˌ#3,i@:׉ 8AP60ADLb Tx.i @Ę+ 0l"Ñv<_,`XJ `1sf'1f0poL.>۾300׌:] zt^&+ 0_ K~Hjg7*(p*C#c0DpJ&𝆠C\.bτ Dtye?fB<}ϸ>4TB/Dۯ.YִUhGvdr:SUҺiU3] "s881 xgѴ55_Sяx2-FԺb1`hv$z罹~E,@p"Ԗ! fCU*j~U$h>(.ڪ_(!1AQaq0@P?ƫ,L~n (|cgט8, Ɍ\mL(gx0R(Nܽ8S[*dJ}7ŠȂĹ5 uڼ+͐'b(mQUtt<ny߁%}*J.(RT6` Vf2&XƆDy $ƕ۾"C- }7LZ?2zs$ABSҢL9ⳍ6l@mA2!8态D*Tt8A9a&yph\mT-Y@\r磕gob.nSZ!PC!!`E$ s%|`~Br}CJjL/h'*+\75Wa{p(yM$.Qi\0ZKJ5 eZ\j*_Ǡ`crOو HyCF qz:"qփs_?xEE9UFFbG SEZ WX]aTR% 7 (#b" 7Pո_}v&<ĉ{xxSn1=CîiDabq V 9WO^YLM:{3Ŕ9;z[ W)AEۏD]1PrN'(@ڇ!䨞Dc4GMMWgyp^TT,mYeqٚJ|7>)we\TU5\U # hnarZb&DzO(@.3 rM% zgYQd=֚dP~q#UT.n̬WW`ϗ5iWhy|̂q&UvӀ@_+߮$X @8/zRl2?g$WHm&ƕߛǑ śhS'U0 ؉P}!:ix>p`BRL'G\F<"B*FLcdL$ݪx ' Lَ^JϱEZ*02yI@),N. ہS+p}mke) :<_ /Ljl@4kV}zc /}o'!1AQa@Pq`?g(6ch V=4Kq`/9xyNpf2Ql9A 17_&8N8pcR"= r*DC<*^(<ڒO͈N߼:_:ɦIX"(_pDd99BRYyn{L=} FI(X됄 xPŒa8ReEOe+>!BCS`1Pq ы97H%I'?( 0>.0QCI-PJh8cxQȓ˘Jg牢dX &:0U =l0FZzHy0y %@s' &HA[F@a^/ڠcuԔ~8֙Y(! He1@3m P+{?"vr f8#n$ bTGcѵYnܔ()b4A"(>`tZS4S<K<=Zn8<J GQ/ F@EB@ylmQDBcт*qtE]*3?%Nvw"b)&zYDP9bo{ĀBxxC`dR`9*4{>':{rX}rt P@xRPPeU^Ȫ: Q Ņ)6醃QTq8bNA 5W @ RdIM(JC(#v[3Ԁ`ƒA hzi…5$EHV"F~j1/ i)> xfY Z#*=chĤbP若 a\/gB |)xRr"EzOί[pX6 B#>@ 6xOsmpULKa#jCTQ  @02ՠ>A ㋁N_X}QCteBLڙ11խIWg$V,{rLӜv"W@ 7A5hK@"x?[lgc 8=q=<!DFhB  qA. AGhmBa?'3R7%:DlkQu<F< Y~H 8^wIfcjKCKY[/h^Jy:"PK >% ([/[&7xR`zi $kS_CcĒLi!A`BhCzcEǓʖ2%- &Hӕq G 0ۧ.k-:$=T=!& KW^a$"T!c tblP!pTV8)29  " rPYyLL4Xj֖dMǤrCNnGIh0=K.nKrh%x D[jbvJ79XC~%A"Az{PӟԤ3 (w }iT?T:kL_Kiv}dj(Bov]höy;BT_VM3izj$WXHi3ѤҰ3k'Z**8u=0 HiH$plң67ŏqU;ׁ+Oʋ~PWcH|ti\,ykGYd$cnnQ%bu$@'p8hEI*RQf|.$$Q)|6Hp6j_H@Qp mQzz1g;BA8JIֈ ũIA{ZGCU/mOW`>=+ܵS]s⏝-^Bނ%+%H#]~."{P2 (vG\D2'?K=-<`HEx"1qUP'< %(9 #}O3,SWQTW5p TNRvq*3X)` i= _} #|bFF-ըFkJ^:Né”h 7\i ԇ?K|AоWE۲LJg%n]$Ex3P9YaDr ȢH‡E Zi4EgCj#xiRbSW~8~g]{s)-9`[{-gO$bFt/ƅ,C8ڔZ8X+B~g}5$[+7ӏͬ24[a;$E8䝇~ƐxKN| AOSL,T]b*5%i,#XQI޶&b`L7/X[_&0rRb=xl2FKxle>eEṿJB(Qr\R}"$$rK +y`+c@SzϪ҃EJD A=A 'BD\L&/REڏ-}2&?+J NbfG=/8|9:RR.Sg}0nc݉ '#0K>ЛP~h聖)0$`H 4 zn+~8GT J &r"6}i) FKXh 4K,dOKN'eť1T cX/1cr䣡(ο8JCҋË́Ö4ݚFcr UU{ǯ@p4=3 Vu|GX(5z8ؓXHVy % c&JgŐF;YYr4@d .L ؉w)-V @aA\WiS@j5PnbM0Se+}#SW4et,vT E,<%E6:$u Ejnipy-0.6.1/doc/labs/datasets/volume_img.jpg000066400000000000000000001267121470056100100206430ustar00rootroot00000000000000JFIFC     C  >"  pG?z7Yr Fy>9 *܇`),7:2Q쳨3h[G'ܟU]q8狶)磰`21= wg;@yu^ygS1nn_d~_sZO;M }G2{K&cvW26۪V_wgLb]!jX t~wzsd,z~{3* smW.ZXg 'v #pJ[i*ɌFB;:\0`.V{IƱ;IW#fkZmiqnGeFVj]9ԫ/%5yս9Den*%nZ*uX7#\M6U|ekhv.p,_QKZms!|%Ծ=%Dw[#05\{lDqC@ֵt.2#}RjD4'U6AI}saI+^pn, YgU3r!ˇEg雫R=hS|g!5YG춍D.s`UU>V~7&{#Z\[`3Vǹ3*/gV+MBh 9-oO*-\Tm ,ZekQO귦J{蟪 #:{hU== ƱVܴwUF_ʷys]dxrskCKg'BuZ_6vy)rSR[eʗ_H5W'Y˥|y9E=3_LЫ/(v^{j~vܪf=0nڍ6~OsTw6x29Қ۷eKohχfwxezfeWYQb\0Y}Ӏ!MBl/k$Z#pWNZ_/w6\k'5nX?7d3vKYe?b[گnzx$ƙ]7*Y*hϖ5^黥ʮf$׊V^k|>}}Zr/c,婗{>5OMޙkujv嬈r<2qWv'rYt:!к7feVU;"?k^AIU${ܮӍf캯Ŭ~W>fUdh<?OOHu+gȬ/I**չguI-uyWr/d˖z3t[C&qfӯ=ivm]TВ6ՐQK7 /_O-٣M΀0@P!0$&15#37pO t+-U1b6ح&ЈyRU0bzW[myc]ZsP齺s3{#+ Yu'4MҞhI:>=f dPXFy_)hcڽ7K~TmzdH^ko"O4l6q+^VʁcCXXBrI]T’& omBv+rk]bP$$c!W6t F@4]j~5kA_,;(^-skZbM}cE%.Z>2V-0k4v4*BV]+@,s}KLBe'X[!^6y˘YԎ}a\^3't@ojM(e\o*(=*Rz[pok/kq~Әz"z-vk pF_dnkEО{_1XbR&FY(w0ޕN;ʐپ.aCe֬}][]s$ud~꩔PnQBp&2hRK&޺^WH ?>_@aPU.+hHÙ7kS f\ތ.OIЊk.İ/'k"< ?Z{52`tI_sdmʹ~LH['I?!ztUnFRAkVv~~^eOIs\%cOlT?'^Z QK9$#ioN ?߽1pT0S|KD~StP-w`F7t5#:nQ\|ҫF@,`lIʴ]-zMHmU ڏj3eAk cݯqP3CWe;zU2;]ˮH^ pK.|%Rɶ h>&mȄaYKaT-Qu|* J~r=l),v?l#ր_i.kh0W[ˈ}*vس^3u`BP \| kx$\ UBj%~cʼAk{,3eoFt 5x_ىʹBehMݶ4c[W0LUV# Zc[,~ܵI19rK/_//,{t_X*lkyrƬNAMk0DAфZ;-j-Q= kWM}Lu]n,hcHpMHm0]P&7DfUcU}uM~Gâc6𵖠5t駸K;M!$ `)xWdpĺAQ; L sXQea_JH6~UxuKkg'eRϱk`;͢.8&;dfl5_S>\gESsGUZR#-^-*O4XDm},/y^Fr$,4DRU(ZصiSG̩7Z\O#D5CW]o[h=VɏXkq]6.: TQvH/^Fue0[~d",ߑuuԜ?S^ DE*ly=7AfJKkGDc^rK8*h»hJviSrX֗̅*Z1}Eh# W-nv^Ѹcr`"ݛo""1bXr(U{۩&J:|VuRW$P[VVASׄ0box~r?i "I!KzczˉunڷUNe~-*[s,VZwfA]k\#E\ǶurU-h^Լkg R++$MJwJ7ፚ/H5"zAm\Z?8]ng~{3?ڵoPA-rYYe"KR ^U['w@bɂ?VgK`!l$ǯ`0cU}W]ϟcY`ZF Eb =95T&C5b&0@$ˮ$*b- ZzPr5Zp# 8!4Db9п~g 3 Cpשv|@yc|2EFѭ.dvE!9Z#AiU ~&1YYc>48A]{iKqAigk<μ =l1BaBsFUEm 4uXֵ:8p V|H͔!VIk֣Xg)+5͡O\lRiv{9m$ο;ms'3h>PL%!/`֩CET[fCcY`] MXE3|{?nOiv8&O|pYPWVV ۄ9bֻ{9?|.Gz M `} poili\l9{+ ī+/K_ #}t)\, C6.wY@Ęm$4cʘl>SEo_^g^!>!1AQa"q02#@BRb$P?m fjbZjZqH#~s9i* FO+B,NlǺjj}nnm {vx(\~?kY2\{+`|lu;XAyݲtĭ2SJ-y{X]+tZr+tt mMShgeZYe vp6D {W|y̗U͞Z;/=,$ y*F\&p~p8bLK!WYXl-Jӕ&7 Vi)THQF]{<YaPg2O.N6 ۺ{mMF_ +M O!!k ,AaÔlD|lo.r+e%f$[|LМPp+"F;fuٳ筽x3uyF*[M:&gwJKÊl4 lsP]*VRQRTꋁѼ,-|teʔ t0w7| ?XB'Ҭ!a(\/Cb-t1%va&pA οzU?-<:e2a }8 uڻ#1--8\62Dvr7HZ1e6PO{G?KO*J6:` cj3R}bVb0,XSH1/Y3+KSǴNí9(گ}!R,FtY'U7-dmD+t^Һ9CTf޵|}v:v|{RK޿Xx[F=%EaHuĂ@Q<94}a"*)yECxzOg:d1|17Sh3P(kmтmQc aʹj?.trΦH5Ry|@N@G 7&qK}i=+J$,Sj "~x<-V&$u8npUǟ1 -25rn,xiD-$&BB핑ȪHx[FvsSQXC[{۲ű/hl䘝iQׇَ/t(Fh0U;k.nX^}_JK%{)i_X#"n#v~ R>ǭ!Jt& tB̃%G^6whdJ:%r¤\!.#rȩ Xws]]uԓ1OMнAuYIZeί2Gpld㪸:Zq\@^ϿPJoKD=JI ܛq+E}}!eդ jGln6Cki϶q{c爷?8KVG>'oy 7g6>}'#k,GuYt^ѳuWeUEJߒoITP1,U)n7ÿWtf )J62 - *ZPC}bq;.%Ŵ >$Q:(V h ?2L=)+0S5 lQa78fBNZXK4Ʉ+;,[u73B:_럖So pJo{8'C/qJE>I<,p1R|TI=IǁM"3SnVc}S%)R l>'5")Νt>1X6[vJZ|=!1AQa"q02@RBbPr?N~]Sߤ~Q%CHt87bY HMHmE2e_^$?3eʸCUFINyʔI=0P1{kʘN.ߕYl8KcM~4rryz&?y~C|o1ST 6UVChwS ڰ()927̍<;kəgx[k8mV#j?@x!gs\ϠnIMt,&Jj< ETB'k+Ma<>UP |so7S%^g%ԀND^c/O/8Z[?5QbXЛxӼD `y>Gi?l7t!qh4ME<Ϻ` mV*ʺz5òI /mT\q"Et,ѬP49Tq8vHZ}46e$ko>G-`5p̂RF6:DHL\+E­a&%2m<9*TK/07yFD™/lG sqpA濦ZK _h&k@ydyNto`C+,WF_4l=<- S*\QN$IО9jPou:T-u>Vb\0?6mQM\Tī*-JP8U ]f%sIZemj.Xt ai=NzVxUޝhd)v-MSh]B LjF$}ڥ0IN󷮱ԪV4HP$V[)%Hi9Iܢv=+QJRIp)MT((jnYr=).>wv>?NP9ļ 2d h EBdmN+wqC[%BͮFw/(-&A V˟yDĥqԐ{Q'192WYńxMZݟifG,YbQtiMm 撜b4RgޱFL /v]af?Sڕ) kgX&]1R 5PMKq9?Q1M,%RܪM76v4Ha&Ѝ;s. &4X[`go぀ Ǹ>askey\$dekI 'Z,4O1/c\ى2e?yro潮5#<}Tj"M.K(u>-y6Nt%g_!ֺ#;Qer] ZQR*3C m9xx+V)n9eptEraì8^-7}?Wm j[WFU+wN֞K2q2 9ıW.4vҮى&1ro-nwyphsӄVq2xY)N'[u#ҡy| JNrf:1`\饅Α=6OGm~NL-aFQrJnG~\;fQJSm/UG(su)e]G#D?#-C,ZȌZMĒ->1H\n,qqTp8p 2mE>ze8[FZXe%^߯K 𚼶>b~~;"CdBEN֦zA|&(. m1.N$"t*Jprrzr^)5 N/X)#'cφxZS(x_؉T0XRO ,Z)fug.Fy*:WQK9*mIsv\ǟ"qBYwϹ00AݿQ):•0V\wB[= |Mqw6nRa@fA=xnvjqܘ Jw{#gbGuˇ FKi-';')q+˄=u:1v[=;sq7ɸ\ 1-Mqd&D@˘j#hiA JRHPqS¤gLYfO͞𾣉+$(J]s7*p qE:Yo9C2 KH:Å!"h|7}"-m *7?g̺fYFT$Z18䃺OTNQ ƫ_ҟp" 7m$%V_Ihva.8W+1tDAN)LIJ%S؃oKF6y"F^3Se-:{TSw/i>$\+~׊UirN!>=?3Q|Vifl(0.e(PR  !1"AQ2aq#BR3r$@CSbPs%4DE 0Tcup?_hmi[}Q. RAZgRM*-!P8K87]o> +Ux٥Ѕ~ɬp')kivƗl 6Hӥc")n ^St,l\ 7/nmISnGT ݣW38jmKiTw@=!srr3M273#{k[?jbb90lZǺ%7Ekl{?T"R&%&j\MJa!B&7* ¬T3 vMݼr?'HKn)8 )Egug{Xf`MS] VŁX~j#< u9Gb7_j9T)+h7o1}"*u]UnƕpB?!'F7*-߼m:L41PAn xk{`Ibf'v 3lH+ZWCsrq!h+ 0ÔjLx0'SlL}N@,irc-T\Tnvwt~?(\3u)6;4%BAJn}/o)E*6a%-W(`; #I˾5MmۉҖCJlGv?'^x1#?w%f&ۼz&C6({Ok. KK+ˎ&)4]LS͒JGuibG8U;n[HfHg6HI<?g%|z9UZ=dۀ[,G$ 8K~aJjBՊYwki]AiG#FPjm|s$=NuS<L'_U=% ^5Ŵ4&cztꟄ>#5&%z{h/vJʹG}Vh}=c#HOKUiuLs@W?ETdژe/6yɥVi]x'B}>]"Va|9ہ.wTo3pj9A&:aCag-&Liʬ;H&>kWu"W%f'7-\ǀ9_!VfnY#l. q !դ^Sj]<9r&]с''pjlJ7$$k/)tPD'fY`8uB+hv=p~(_uB񽞚Kc~;2w>L?C YrK_lj ۩t>1+rTR5 R0gV| *GnAH^cNg>B9,_wc8):IpkTLSՁjcEom6uԻlqY捲.59!͘J%7a˛woAyKOzVi8f8.aFJ Go6#є-a/N#3E>m6ԳH!z(IDelXm]]Lzi^TiR,>\Igz=ii',?F*Zi5V®7WlS?>zlZ>ϑLZ,\'6Fe%ˊ,?f|$JLRVe\Ҏtk^.W^h*vP nY {NZGj55Nd 2u'+ܥ@chKRR2ni۫^ݰSIwn(:."nn>䠽!|M*3(i$_p0ċ{ 󀴛PmJ)}I2MڣMsvWǔκKVn"CS_0HoЈܻ,rn[+ܰ 9]fN4e,զKi|77#OzB6 7W9[P~F 0֥O|GyPӧ}$e]ŗǴtf&kxT3O/[gۤӥhn@z7Fl:{B7md~6 (W('g4P <.d}oO/5ުSg%|[m: (2htCmts!\'֯4a3lZ5;wޟGjedB+I=륡K29SY8Lu*&UEn)%1lte>PO}I›RQ٪0ސw~C lj֔l_5$4r9 BV^\jM3%5PmwtC̻wl'8NO,llBM :ÔY}hnITM/%6+6RO;A?IqKNHY) h|P}'*?c~FQhUfS+>i9ɉJXҴtX_(jԦmĕUanei@$gLX¯2Ed c5R5 ӛF,¤vzUʣQ)iۤmn?gm>1b2!m(o"B&'ɖR>neYy rƐZfnNWֶ] p(UXWڴ:9s/6m{=e!+1[ e]Qe sM u))AQiҰ9c%ò敻”>;COqyz4~3UE%Ϻ#Œ Jh-<.7rK@^ZB-n]%QCw\8Tf\>j"0.'mM/̄ͷދß8Sh ӬD6vUt٣s=cDהT4"5rc%'Gv c3_-;<:E*'X\}I7hMmïȵ9:V8d++]nqJ8_}:( yZHRM8KL2&Rӷ C Y)fm=F>73V|OudN S.4w[wGc=k'YgUCCU6ukx؉\hO,B5QvSW~jL /zuOI=r,Ir/0/~y[T _aI)HxKtp󀒬΂$듊#xK"?|cҴ֦F NozB}R`U'$&kzcxZ{Ѿ_N+Cgr]=p `eUZ~/j[zIRܫŷZY$~0j̃[SK5MǾUKh:M ~զ~rjA\R06&:,N=yUZR?S '+U6jRމܺӍ[.onREI,%)+B]Ze*$a;*仛CIƷ_M(r2YpcMЈL˘B.pN"AI: ɰ?81&'l%uYgCmA>%QE%B%ĴnUʲ<#넠2㿫11?Arw[l*]'%?('VNI;}8o^ V-Do4(\40a'a*D9%x*?xujlՕtDVr@x1 j_N&Yekp%E'̤uB5Rl퍡6뽩A)QSx8Jb:L̰OTX%]A<>>F ō7Hjes|l=uZ7m#.&76_K7PD11\[MKo`[l ZZPoϟwC8%ZaInm7uf(.*V!9^?ĎӜK iDa'4Ji+ ٲRy}"O2vkBVW m|홄Si\U`Ȣ,t!e##\aIN_\ v, ۮa EZRrFgeMgq yt7LH2uf_$~0h2ԝZCҨ`aU( ~Q a#źs:3ɼq<.[hQ+0rbe;fң-X Wڪ~Je֩ WaâؐkiimKT,zmd2zaP -L2߼w"!%psc(Dat%k>%X2ujq SjY$VaXdɿ྄U.n)Df"H%O4BuȿF\F`[ RLRdR eu}s7:9ႊz7'SNvXKMīr;:R+HQL*Pu,wRv*Cm6MOUiiZu#1;zU+5M'&1X!2 uBWB6ΙJ,~V9==%L2P9ńۻA6BY4(yciWwe*fDm3hkz-UaYqenRGx L;OqJF!CoC!.MzCdj,]fQ[+%j`\|Y>jat*7PvimnlĬ}wYZW"وrU!u2(id$Y]!bKm5$MC˫bCEE R((e6mi^| /zyĮ *C]G%CV:\Q%H[R&8ͧzf382 `"^!yӴ^jIyȃ)5e zBa]wxe?>>e\ _`oxRl,>k^^&)R7^Q~a=VgAăi* 0˦Yk Ҷ2nڦnX1}(T;-\/e:>WPJʮ7쎞&v[]ؓFeV\¶_Fd_W%*ȤfiXKB1|p$pbj=ۨzwn 8TLՀU#|R]rlđ)^Vcb\LE[EyK~,gJ^HΌ.#N_t*+BjaԗOfqtICE^89^*sIw tVo6zHKnJhk, Sؚv%ZN69+>JQX_k\sNg噇k fZl",~_f%e&N CU0.OԴrHT/W)wOǜzj%err4hS pw[B%vr(h45K~aR-wwhqiyKE ~pҎhu:ó'jM\SR36KI.ۗ|yz(Jq[nRCQ>Vj[9}wd̛rK˾r=s @Uq KpC^!0m-&Pòs)uYJIIFg(bAZf1:ʰ6Mo`*Sܬ@M}eR;P B_RFbDTƅTx_G} $B^1?ugܾ%FyXhAnO3 ٙ%v4Ĵ^u9}rN[ZjOx-=D4{8ZSV!N:å$L eg/(6bxbSi^m)fQhW[ :^97I)Z]JwQ22GFUdջ zaQ-\ć;,|.$$9o/Rp,vv~4N,[beɄ4AN8qmLT4e yi:1K~}{2Bqk}`m:IK=Dۼ/`,}*nTJfBEΞÆ\q1d݈6 NP~S^pgiPqi aHc`XiˊV"4YuҤ A uɫH8]++\ZJT<"An?vp~l NĐpE%kbA\:Cݞ{; +p>ꆗ)tbeg3(0<ۘOy/筢(I8?XA풋W$Mb: 8&UÌ mƥK2 IJu[)IS(Sđq1T 'wm]aSڻ) j_sחHr[dL@e+{gxe'%l%@ZǞF8sW5L::}SqGU)mn)+™WRbIz#hiJ5By2P!>PrNYmKLĥ r=YyI: M3?e]z 8U*.T+ZUe%V2u֜P 3 |찿DX R>´Sjڃĥ9aq pg*;UVl"6ޫpJu:Ĥ%TMȼe}ۺ|9B %DÓ* O8SzQ j S(SJQy&wvRj¤0̖ѴBRfKcZk))oP37kJI*HJ#A-:MV^.lZYCxC˷22~W!:ÉHݸ8w9XڙOh]F57,-k-8"^`]lB=99f%\m Q$xZU MTcU&=zmxVz,d"C*[|]J6Uvjk+5/4 =&]8ių{9zsVX^8/k`![5TҪFAGچN'#ЃNN#%aUzߑL>V Сp7<ѱ Mԏ8ۨɪJzVt=̖ G_zP.s*)HMu:Fȥ%+LVȌ-'aɩݭ;/PoCKl_<ȩU|+aKyN#%}^2R3RO"̵9.Fd}J#^&ix ȏMH΁e8SMפ+f7.5)fH=[R~RCEِ%[$/hX -ac":E[h6V[M*VQW uӠe҆0±0aPQaqyo[qW*la^qu"j/%6~Ħe'8__pCl UZ;HXr?do.y*A\4pW&y+W)ry7%/&Z;rysts~Ϯa7$ڒ(s{QZfi,kYk꾽b[.Kֲ !`W(5)jq6i˖zV$T n$ ͺC5*UE5t-6s(&U&YW8rc76u搼M1e+V\*2)V(b6BJjji}~Ӥvueǂ<oJӧQF=xqG=P;*ҿA#ֻs:\P>0QmᖰX ZJTݺL0 Hdct) E7-(-&b^Q}DfX¡puS.M%͎3Nd\s11Af %VO9|>J){?| F4"L΋~Q/~^]7HiFQ媬q.6[XRI 2zek 36gl^',m0 +®?k#dG3κt?\zcpC;[R}%$fZi(i7H<-nJ6uS|W>2Y-&K[O}8C;&3.X9s[(dk w L#O,A峩G!pnqJZO =uNi#6tg?+Ef|txx\ { Ү(9)LBsei)3ad^ZiX)_R{=rj}/ZK XLԉ/u_/y_%1xmzYzĈonN)wtz9ˀC.*d(vœ?twԋr&;d3N Mm̌bNJ9wҝA _iqY ԎpfPJe7@zU1Z6;eoYEHݵ?^pr@J^P9}0X R겈~]ƜL2Q /RIōVe8ڽGf©!XD [ID}y7NK̸ipG_f?ΩOGDܓO)*J^l(^vT&f~ZgR4ֈtԄlJRke+A(FRGp& ]mZB.ޖK \q٦]±~/C4uZIt:͸ qn%Rʰ& DvU-s۽tPa idv `,pubAU82C3HȰlYO/CZq8 !M99_e=˽P $wS2(ZuJ [{K*␝l1FYIAIKGOz]nmB_8bBI_OOtfg J:u8w'm`6bU9LL8NPv{bЗ楔Dyo}>:\ï>cykW_iڹIqvj8Cq&s¦}iķ%)xb/L8﫩=.Gm0wPuז{\?;T:RH9Bh:rRR!.%ڥ00-KeKI?l%RL񾐹Ku]T/krA/_ze Wk3mS+ Jzm");}wcFotÎ&,PC4daY{UؖJAQH˥Ɩ8б{ÔjZN*B&p-4^QIVR~k+rA+.!r,IK7 PD9UH}ĭxY_[N%&Q(F(\ BJ+w}b]M$) 9r?&}8ӆGQZ6ux9U+l&]oL-k2NJ5Eop pkaU}qWLݐ}ۺ|4쇟[x_tfx}PYk*ٗ_ؿ,O͛ܔskXS.J3,p. RnY8 )-ͼJR1[vX|e]y{CÔʳ 90հ̶Sw]ZAß(nFZIUNe%B]UC_X.꯰x(ZAX&mJ6KRnlwhwM쌸 h(,>Q_nmV7ܸ_[_W$Tä@<^̢̼=n-NIcpw=RyD*vJNmr06c!M.Hc09\D9C!BLW8E?h{)O KVB>0Y0U,!|4ة`Mh'uO"T & M+BF#]C6Nmv=g8Bmx.0'.?jEQ Q%"@J|~Fo շz/fFP㟉XTp@RՐiGnzzq 0f0ԍ2In[^r5F"HH4 MH`+2Nt ]QY@;˼K ':XOB>n8VD,fJlOL+E]9A!@dF:[u`9*%+8n3W9/J+?Q",RFu H9#~Y&R oIa5?-E%P߯0v 3e:-S Ĥhh\KÌ/'ISm}JjBDMZ"<vSy@zh G8$JsKkp5֗Q > [IpC4c GQ9pz 7nYE+ ߝyY}jn~2JH5#).em%hbkbwɟFl+ JR.. |1PD %- a<=C 6r41!B 9$<Yt xm*m9_T|t'dVXn$l.&\Wby l4lD2j @ XAOi-(%>0Ͽ = ĞF0ek > E t NSyN5Ѡ4F>mrS2Is&Uv0*wrC8nr,kQPdX`SӡnQ'<^Лό':. s@V T@}~ GXL}?BHBvP5IwT{5->!\&0잰/#y0b|~z$@0/ p %Ъ,|zZ7d/c 86eDk MǾzM;XhSbC:vx:YtAzeVv 0t9iK0nd^q4 s[>3rP*]*M3Hԏdl 4jUQBi=l0qPY$d!md>k"&t`)1^P<. e8ԉISvl0<.v)3&.'[qkm} HzŇ){0HlȄb4̌ a@43 SьGCɇu8#•G&v`gf)yǬ"NLjŌu0 + ]J 5K& 7X@3)9|z`yAy#iiIȪ9S_nXMXٔ#K# 5M( tRBXHr۾j럱ẳxB`#q@BdrEIɄX~r3q3 ^.fEN1P䰬wu;&z-t2mIbQ+TDf1 4 Ha$ }Ns8b/B- 7 hc\bHɸVF o 7)43&~Gpa:'i UF>6X=|,۫pk5`?1pw@Qw{vAJ#L!,oV«5uy"SX-=6X]J6_X#ЃF<``*\WBʨbIGKO33RqSg ;A((a5DVCt`?-p<":|s)nF-rUVvŚ- F؉]ѣ<S%@N 1 uæB% pFcPUI㷶U)%?b{4%>󧱹2*NpUeZԉ9$>0SxE%V %U9g 8:eabuSxk`y1{yFi:R&)lTQ4P/nJ!0HC:9k46"imt+ %< /H kȴ }aLIgP$*y8k;vc)/ڃb~1}_%0LaLI2rϛ'_$e:!= * ;,#` NUk0qbvY zqjWZ\OHqf-~O3Ƴ|MdKL=^%@*OSHェ&>[V%S90K RʁH *' -?v>2fDDmsNxDi\ПEU 0S"V+I '(hz p$KUDy4A q) l9>G+ sXQY^ 4.);嚫uG ^kB}-E8ko+&A;Prx1d3ATZt2`hJ31Tc#dj%f΄ $ O o¥@W 5&B@efPF2nY0)5Pqe0:SWX1GcN?D$1CByfx:`XaO|eO')GͰhd 2K1Ƣ"Pĭ8+%HrA''ɏ)ʁvT~ N65 q L46jsuh2ymPab"y{W)ic :}PnFK*Iw3t{{JٌtbB(k>CENȗ~ jъh[.C#A2,&LO[\W9äOYdPt!kN+& V:>|q$Ha ӅM5+M.Q:zɀ?$gZq%AeDLd <\j 0 A+{)I\.D;kڌrܚ/# E4XvGΜ}U9 6DjYsKdtFԚ'_~] U7h)6nh5LIl2[.ĻkB$GAF d)v-~NUfg*9Io+MĿ[2\hȉJ-ܨ.Or5 I?LJ62) jeٴg"eKքO1zI RT5 W@Y&l 9yE$P$Wtl0AOW3A$ه:>Sdxg@D:fF D2Dp`ؓ3G!5fBiEtƲePT @vA*{#PbPu:|Œu0ebJ0HCYT@ _zww@|8B {`%ঔOشJ1~ %`*0ZOdʄcLtטJRojiLjz]SY(^(@"y>ћPhul.Kh*@RDZ1@%$87ffNEZlxb|HD`IܿŠ`˩,ؘnI}p>dH#Ry*:KsPʑXH4&4I`eUX e %51[0 ~F JBoxBiCHq;@˝G8D+婇Ј)(? 4HlNuO | n@mpfsX[[!3,y[BͰ2<-,g26Sϳs<+ Fi.sl/LZQ0 K+Ax8{~yAK~252b8?U F@o\@1ZSύ'+`F5^bރh K ctTr Qa%xcm4i#,7D71;J+hɆXlv.!e+l{vLlBujR6<:&"Pd_o408]ΓBap:{JY0TFlspP7d H2;=" `=[y4D@(l9u$ h:DPpG&wdK# 0&ΈU-z@h08D-/fIhWlMQ¨;{G 7? Ǟ 9>pbX" Rxb,89(,Rır3o:6V~n1r|pJs7Ɠ"=TADI!'H< 8l)=2s/>Uf]+˯cC5$R?+8)|BHVDYTBAB_5iy6.9A"eh$Hu@r49g*C۟Y"`*sʒfĄ$Ԕ䮵*h Nqy=D H]-Xӣp5NnIdPiUUmUmƝΣ\#Ѥ IR]iJ}:D霨uDD H!H|]reA_ҿoxxAdd9uEb(uGx0)7snpJ &j *! N9܉|<P<%SNQO88u9&r x;upёG;bF ,}R*&AC"Xg5yDlLf-!j._P <1KɨC0(D?+Gy[[~ ,$@=чjKliIq =xsGP1qe9SmQTׄ}Zc oY _?E?hPhB9VC䆀7TwICS|+3l㵙hW(Z~R;5*p  ӳܐ#d!`@Y(!1AQaq0@P?p /O٧<6j%$iy8 Vģ 15 xwy L\E*I ?:~1B|_|@fJT&Y;hzjf_B KOOu:I)|q`PD+3k'Ҩ4C =T2rs)bɳQ*#rYwi#{$:Ziזe ec+:s*דryЃ^y_/d #Oh&v>!ާ U|ErHb8zs2Ѵ`[^ p; cˆW}Ք5 \z?gSxg|S ƪR.tntH:9Y!ɯ9``L>5!}<m!Z.y1*l#8(]P14#u=G?840\ӰxbI2/N}aٔ|_KìO|Q/gT̨RjļuA2-X3O=q#tն@h"B8B |pG¦J*/AKhɍx<2!FWl*=W*UW ^g%xfbihB0c !FlN*.#V ܊Pk]kF2sa Q>2L$N 01rPWLt<:|{[X5ָ;:DpD !:c)X aj٠cG׆|lxEd bOq0;;w-\/'>uvo&9k5rp`t6s^a#ǗLLՑa_)!1AQaq0@ P?B e .Cҥz。=xn7YR <}}k:捇r'#$Nn"P+1!/xF2>@rH@:pcO39^  1 BJ:<~ݰ3Ey:ika@ l>qŰ[WgD\?g<; fY W:Bn 9W_x4&|rNSA(ȩhR=st \9oAb, s8"F(vS3))dz!>ޞ,`zJLHX?ql !)h|_NJ?J_@sBDy5m`;,0DX6̖BɑNP(>Ir A۬H#aΜ;)HL=~D U 6ةqt,(o(ˆ fP>K3*4|L됯uuc^H!r> pME+`~딠͠[ucRq!%[prgW޹q|0c®|%UNT!JDEUhSV|ɯ=RsW-Ѹ"=.YvB!!FGO }ۧ 8%Tn=ҩxjR0ʅ0hJI0&TbLWf}# #&c<Cݲd3.q^-Lq+dն1M!=QЀW՝ noy^Lu.#JMj={:qd1G81v|ro댚`]p*JYz`UT08θ( |QZ5BS1ݍ'\$4" BQO4Q7 ` H 8?;ǿxU(=#'6y4]T =ܥC  qt]k uC3X2ʮQ)xUBcB\ffpLOݨk~4GN\_ugB*',U4dGY<_LaIgd 6u6=.H{0bU OU$f\-# .w@+x QTGniՠ1H8h'`m%+GUZ"scU0_A*"8iiU,lhWaaɥMcLJ]nZ0@n?D#F8 `K-b Bm:o|94 7H9!x8@EL".(,h ? !ޚĎ} "`غȊz,'ے9ۡRb8H1 ;Ci ݉i 1UXKkj†%\Ys8L5ev˝HA`zvU { 76^r|̼p! /HxM癅43ap"AH$NY3m{$1x/|G!BOrƜQ#Q@ph)r<E*ߤp%wُY=1imсa9KK^9jICSݜB= 04\b`ȯ ۷PqKi{#]ܙ#0GLpRKV!ҁ!TUmKv '/֊0Zz1clrTQM x* `q V;LԤ [x" dJ^TU *Tc gp8k($7H%B>$.j]Ȭ9TFE⚾E. 8 1T 3Rx+ Z/`K Bv DC*0#]ByR%u ,?8Tj^ye33 !2X a}Fت(Ջc\ )& 0o]k 5T`=jG4 v=HžSFUxyxED<:aOO!sLh#b&0@كnhh"?֘mLWH#?SyY ib{d)o1O+2-4z6,( ?x&6* "(`Vis Ƈ;l@g ?~ľfSkV@UC;>@^2?f,8YΝdAQ*xUH u0Ȅķ`]|Pܐ!}aDW/4|B3Zdm3,N$!G)OO=lkK"aA(GQDr yNUEҖ@VP:'WUl7T{їe)IO,Q+j|jnE|L*C E Rſ+τF@^YӎkL!e]OAF8ֶМVAy~" #nso!`'r pȴ DY)ꕨ8 @D}@eaD}3>F/44xȃH:k "sG=llHӟ!/+ z )eCQf!iQ' (Ag{-d\pB(CB/ E8*#\25ᦼѮT<$a^=sO)G6*1h#^drKh,2mJ>)y@.pxKE S`AAIĹs F B`J8ԢbV"%Q,TAm! JڬJu\7x-8>~RЍU3T[ ϲ>~uap@xU Q}_t(O`tcI{:">cDtq */._ATBYw=٢D`B}0P*&At|xZ&(XTHT(|ME}<<o//MvP %e,3C[P=3j$Cgsр0)u(3:Sj#p`;H ~ER0/UDIqƐ<QU 4ժ\(oD{ ~ xN<_B?}Tk\1z6% D-ͻ:DO!j.1=??ޝW"s؄mS/KL _+^kW<҇I-Z &&$KPIX}rc!C`V22`)9P,v ",?t괩$b!.].AU+iA"8A񰠂xGo@l`zFrYTR^o-^:r*! i9U!q81k伪!Qj`gq&PG’̆+4fprAE@ ȝ~Wxs%pz_糱z2M7+$ LQDA%T0Q:ɯko :ӊ,̣d+g؀>X:]a#zPx>QkL5r_(G5maLB]ty DgQGZP01՘8_ 8E!@63{B>- (tf񳑵 ]֧NlAdp&7UyE,v 8*hDҮ@V<>e NPKi,(0W/z)BPG8œ )|x` Czӛql>59:yIƨڀ8MmQ$B xZOth 08nd0cph&@Q4BQ:% E ?}9 !jwfθ3 >#H@d<$$չ;!}UUYޛ3?}|ǐ%Eρ A'~A7y&ZNrKP/\s\w?pXCSIQ HCT<2e@5$KtQe:'C[aeA\ |Ԅ'= ithbX4*i?`"qφ}:kWKA[#"'/@ W72@_ FUShnfW jCQ/E$=:/sP\*W(B>Ŝ~ DФ8zl2Z"%IB5Pqʨe騥F!$Z>Y-h  $ G^|E&(6/|=D9!~,*M 1 UG d0[A_vS@6P.Ҩv,>$x('U$ -bR=*)VH%8A`rE'?_;#Ăc|GFA& ԥ?Al#- =ZŬU$l) !t! )[+Ad:& D B(|C;N84ȁj"Ym*G7Esʉ'YeN` &2kQPG80ZL Ɵ=~c29N^rlQA%RuAPExxҀNaH·,an+͔Q[ǮLh( tuV DD <$[T4wVPSTg^H*R'~1 ,hhq-P0I>1!BA#p }?y_eJ4`- <R3Bl4p8}@*熀 CgWk%z_#2MfܨViѐF,-,tHv!D__1&? ?E`(+."nk&+Xrzi"X ~P( AJ>V8e +vA\)2x$ث%BE*òFQ3 brӘS? th@#`K5%-6"Ƞ%4HN%(3d* )Qk_# ҜBRrtFp0KF $Vslik'U**A  +]QF2QTʰh#mU8Q(?3@~Ef2&^xeRSB̥-u"$%ŋ)Rxak&jR{0[,RX USaqT_08y39!(4E`CuXsan|)PVkbA&r=ZcRd ɻJ1 jCDzѣ'Tg ?9,+jj9)J Uj7L2~̔Y.?$OuGPczYèQևghR"$ފ5̏ȅP0i:YdŪq' sj# &|6D & âgҬ|NV=EmDIPPk#%AuDDbJW%)>u )׽ atR2k)ZNH3qQ`9Ʈ$րp!%)FKΠ&" 1Hp֯e %i. Yao?*(љPE78X&늠drI 㻷P\=Azk*v,S6PhtK4s$s*V~&NGG]Ta:}O"οC#yxT@^VaБ((v D} ߜ娴"!r8 }'faDg)* W  <vH!~UX?YIRt3)ߐ( )Ap<$ImM'JU[{OR:I)*u>U.tEȱЪޗW%ܢ^ ߷H6*_Gz3\=u'p (ɺ{¡|_A:|GUy=>g/3lkaL}B-,돽jBV1R*h] ??OxvO£^ZӏXQ Vr{6xZWWח* gaQ0>!^(X,$iIý@x5 Bh zTo(1B3EAy4mo{[zT t===fd2!$HZ::fxmF>\uUfmI'S p=੧ Uǧz R)$t:RBL&jPQV166QR^ϐJBbX,a@.C>G>d* kjT"IL&W===;9|HRd2Ǖ+J˱h"r9$I̙3'Tt$$T]*b^GWs@\*fߏ~ DLiZjcYXt:Ixj믿Z '|2J#R 6lܹs妻8ƒ%KՅ-[qL| 0V+IOU(Ij&͝H$ݍ|>>r@H&t-r[5MdYTU$Iر7p <>φ pYgauAO?FVBP@>nj3B2v /1zfyjH&zzzP(P,Q,8t:\.!J(Qz9%J^=v)/xG166={ e/{!W,T*ٳgsUW]uHK(- m428ҵV3%J\3LH LtYk qZڴi~_"|;w.x\qXl֯_|>H G,lقH$Cooo8w7վF&=n3lZm.qrC9sSQTUѕl6fl&ˬ :${bO't!f~zfmoCWUr-?~H ׯŋ1::&r8RG.ff4}JIfD.;OwJ]H?o6a=XSɢBwIݻp9SO=΀g9d#DRΝ;wq6n܈l6Z*BuDPW%VqζdTɪ%P`˖-YfkxBqCރ>_ݺ%Sԏ*r<Vrl6Cw ]ݖD q5Ü^vJ{.\zZJD??3Έ{*;\W^y%N8| qQ?DOO"f͚d2fB \Ch$nwHQtS: B]$mtZVkLRnzUj;jdYdYj0f[n=܃d2~陒ַ5ioSr~<8c,[ vB,O"S4J] PzxxxxxB,YO=T\p/݋ \.cƌ f< I?˶VE*m)DYىLzL!jMf'T&I(0jY7LƱYT,m!59=00/_Wp0|* !.͛Vxqxqc۶m *3 9$m$z=[cRVHDˎ ֆK,$ɬϥTRUZ]]]B]zR\<%ZTm۶F2ĩ:i2/~q /rJC/_|t#D(˸{ca̙epx`ppp˜K.\rL&Gy( qfkJfBooodD0N^G\vkE}խʥ%\G!n6fg~K1T]I:T*7K淉H*6TŕW^}k8e9(xBqcݸ;l6ߏp?n&$1vRa_lMhkGZ/ D|j둗JSi@3Jku5L?sjǐ3d^ɯtM8s$PJcvŠ+bƌm1$]nf?gJ]RHcV+$|QYky"^#Hĺv&'xAiZ|CkhDvn݊gy%Q̟?dk֬9J%Y&\XBĞ={b_Jeʮj:^/,h_x_O~xgxScylZpŋn#$4֍8F+NPsmh&y$T*T*rE\VEH4K&ZV\W׳nAIB2kUY$ @׬r/_| 1JӁ#Vܰa:,^:\X-[pW-kx;O|bJ{n|k_Ćh[[N:)ta;w.x\qXl֯_|>?cԢVꫯFɤ%]\[wzq{تvp¥{J_ZE\nS%-9S5lFTAd]{`T*!zRRI˺&$rZ\ @PuV\zđUc< -Z;\q+[l^oo&E Em67ER<;/K a̙ #-8C f_Wq-4a׮];P*044I_ҵ}#uq\S((JaC=q';Cj5Z rlf͊^U߼y3x  ̘1ByϩT vQA8I.xöm۰f| RҤ.i%Gf3[Ri8RsGd]IeкWJWj*%fUU\iW(/7kiRGf˖-ÛfO(vhu]>iS F:SrrиRQbll 7|sVPT*(Jf|5kVۢ:,(!Q'?!cfX]vYd\.7 N֬Y qhh(ܛ[טtx$t+fzN%phmIEH¦vTɦRtTf+RMBRh"{ZǖP *gzA\ÑO(w.\/̜93T8PBi;oD>G.C6E*޽{#AԔ,044nJu,4G?@drJ\y8xqذxb^|>btqZBY@TTҤQbdJ%Тdv *ty-^xߺ#d͒MmxYZ}-9hypM7sj:O(;^nC\t"ƍCbȌ,vaj6Z;>1Yhp5%N3f zzz$tWgv6de tF j5p lj'>{ʷO;4|G8ԨzV lmj68b+u%94fo"kHHڨZ` yS%ֵIh6m?kJrH&(nMoP4O(cP.fGZ,I…as\d( vf%iJt6 $yJ2@d2\. P(nn(Lir u2v0u=lݺ7nDP^"Ngvb޼y 7܀[/p\.[n ےOsC Rw'B;D*t;ɐ|1@ySTh'iY^ _W1j/ƮƩƶx=yE~ǩhJO(A;^?O#j !vY6 o.,쳡(dl6Cgdd?q%O6/'R A\.ev>H&h|O9h.ɟ e\.P& d2<:#he f8OJVAl9]w.i7 K_-[6e8زe .a``\.DUUdYBʢvMފ-ThKBH1-ko`^- SsY )'P(2&ILo&nv ޯ3U8drɒ%xꩧ> s.{ضmnvڵ+$K:d2G;#՘ 1)ԗ%$M"@:]*>g^g@Nkc@6 U< TŭT*سg-[F>{2p+_ .\|RC yf,_bgac̿_Js%H'5KQf|$X"F]>ӨʧCo{qz+}jǓd+^|EX===xx>/2w7o)2՗= 4 <UVa͚5d2tfbcK\q#lbC瞜ƪQg!(PCPk{_%;5Iu`R9uβ=3عs'͙3lvJe/~W_}5.r\x 8TXt)mۆ9sD”JҔemPZbdU@i]\u{%aT*㚍J\ҖS,$hH4\%6NSCIK(v؁g;w.z{{90P^|Ÿ⋧SH=?3bތc6lG7>XUBsct1$T(UR+ZI+rn E5ؙݹs'?#BV$>Oo|cLlK.%\.ɄQzxxxr1cF\ng7+8JCc]o$RwG qDR E8kTcjZ)]#Uyz'|N,#ˣ$UmxDoooB] eBerhh(Tpk2bͺ$sY_:;Ev(CtY{~olCTWϩ$UP%8.׆p˯T*E{ >sfqԣX,[oŞ={ڒpt6K]VUr8` %h5V7QeUmGe c^Wɵ݈Dxt:ڶj SUu|`Wx\õDϙ6mBS[g{ԕ)nC;#gکlա*-mXR%Zw / Rv%XI=8Ͷef'_ ]V6YӄRҥKq}ahh(0GII@P,Jrm3Amt:"kpJB;HI`mY$iLXvϘ1if ~_X,sΙpP߶Mu8SOa}syx? ߏk׶mm1?qW1Tpyuom%rJ\.W7=UJr9Nݵq ܌CCtTtIQ7*16UU=v=R>_-il޼7x#.C;f O?e˖a۶m =uY;Ɨ) a#ADTj6JkbSuYffول)Yxc9}rYJ%,]s' 0<<%K' UIŇ|h]]8&茌:q uk#8]3*mxtOpLdbM\]'\.]ڈl;R"nଳ?A{7qfݻwo7n܈o1t6{l?{- `pp͡6c:YꊄxnE]G7U,qlt:6NddRATBTjT=+V*CͥST$~e S38㐵cP.[ ׯP%T"i nMG#FYY.t&b9W:FV =u(YMva"jZ8pm-]&Ⱥ骠YfLNb qn~T*?!of:4cZk.,X +1cF[ouqpe}r0Y|jF/zK/@˗/GB]S+1M'TT{J8aJllw%|iL$UF].t]&v$RAU>U2Z.LD*Y'^CE'3itB%wApFkbF?"kҵJ*kcfw.k)P$% T,%6MU@80immЮDD"΄ll"xN*kkgj6زe ~~wqattׯ͛iӦj§mݒA :RgNp4JO "vn̙3}}}xk_JÆM6u{]ئ5ť41*ʤ^$NZ+t%!f](aMtiIꤵ{q=Pdu:r*N$:t&z7nO~ d$ɍ륙TF3LH*It!KBgIT\pDtsKϣȝyVQ'5GϧF \n6Xr%*>яP(xc~  Y7|sD 9sfd, c>Rh93| Y+X=%XihZmJKhuB+˻:~N㫉@bcjq9z]ƚѸsU[X6Wv˗>Vk'Ŀ]u__d3gv!r.kd՘Y:5?;ktS\uUj? .0ԤV "7W:*T5P=}%*∗ ɲe0,quw.uAdK($ۥHZ6]WuNn UD"q-S kl;ֿ>JkGp'Fq@_>JBרn.H&mn>`g!p 1CC3}[jvjfя~tpW1V۷@[&d\@%H~AhKw %u*Gޓ:׸]U.H~ש\ݷlۉD###x۞T$Ac=.Ocw U$Uj D4VRVͲ\/0RNYwٙ*jI9ս*>qs 0Z?O9c*A͛7ctt?<֭[n 0Pl;[Bɶ*lvsdn~%ɶɨ*T{&Z-ٳk׮T*N: 3f̈W~QuIl^ۡ.3 #tǹl&_$paELXi =&U7Z04V 0Ѻкe})T7+{"B磭fe;0k֬u*p 066x$=h3;!i[b V #. 5tDHlvGI#LuRcU|OxՅ###'?\.wHf]jaݸk{ny̚5+LP.K/폪P*3Bi)|Dj;!k6T*ql$-w}{8駟p,ԥܬPuq~i%cc̺K K~gAO5~#uJ`]*u|҉O W\.E6=. JWS2<<WU̙3gJmQG(Ylm8)6e_ z 4.$Sj5<~guv@ũj UQE6TJ8TeYY+go%T < P(=۶mw]i̞=;Ldd4|]]].nH&V*ܱ$W2ˢ3 M5Mw*> VXJ|+>dã#CñW=Uxe`LN,16nɭ֎5;U N"Ku~u|н9YK RD"8aNRX`;&p:XfQE]i@4rmD`w $gB]Q`}qma`` l{044t;v`ݺu ۝N̕XƅyYo:vH,nNH$mտ8~hX'XTJxjTqjN5ԆٺcJ%./fZ!N6ٱc^xP>ظq#"4J;U%Uع?NR>3K(-tGgp2x^WIׂæÒJi S*k"0{)=T*xᇱl2$Icƌ%M4[@ۼ T*vEƌk 4]5:lOj{zz& ]sυr9̝;t1xY'x"jD0.n3cYb DIhU3;)z]+hKx,bC^&cC:ZdZ5fwOd7GOX5FGGQTd^itt4\c-8!YMu0l2 *.;qIv6*lX"6iqMu\&YBe:QCL&q}T*᳟ch4k ~i0"Bh{;C *v9&Z4ƖLBQ*]~)%Ͳl)B6,^L*q }}}r'Zl'T힄&>Yq\ (rǴh`ѢEشiS$Owa֐хfa1v}<Ʉ}TEFKam'./N=cYt$N,T' S(_k(/P!EJG<,Jo~-[ JE2Uep %UlK($ܒ7SDD6 -!+)vdqr8biݖr W>SWh66oތxއ cҨXx1y0JJFHCs5Uu @lذ8eU񏺨+k] mJ]VUuZ |śmF>>c@EuS,aiaL&$xZ*>rDUKGD .;T4:qq#5| ˡRGżyz#P[?xhl D%L*֥dU#&X6]Z2i݁v䗝x^&n")]궄_:K2%5Asxx~;>lO(r֭ҥKmNǬq6d҆Op6S"A7Jʾй,ֵjϫJ:I&+J%}\۷MEj2.]jR^b_ !Z-m`<,PAEg݆ѥ";aI"@&  v VDy\֣?~R-esl~gI3jO{zzP*0|?#^W`pʻ|+#JpzX:8vR\$ÁI^Pe B4;VgȣUS5j.Rig6Җg|O"15h6Xz5n E8dR ,!.@lDB'6xn6pN^k": m.Ed-\r%y~#A . Sqta˖-/~-m WPX7W:avpL f^{rh{+X"l8W\}.wkks:O" rgS#Pڌa׬ߕe ɕhms%y0NhŧƸ ുkg*s:+.L̆2zZ-,x!GXp!VZ&hB%OIIN ,t }RO pW?״NWNFXƂnt:%K`ǎKH*Ⴣmxk=OcBۣ8kx/?=g'j%n pyddZc փgmH1ౌ_d wUz$G$lZXn`\q{X,W#ʚFGI $.<(tӥPYBRbQ!Aޏ˽ogv0nlv"`o\*vm8jK⡇B@oooZbPu$nYہ_$ϯ!'gDN{E*m5mR5,]*? .D6k_ڃv[yL݋͛7qɪ.zWD*ȕ8Xe.r-~ǺQu""WNc^w-tJ|̙3=Bl6b 8 D6>٘zH0m,JM8OӡzCx\ó C)Ad2HYjyTk c Qq,@4~ s#^z hw}x+_ 1~{tMp]j.7xBTDNT;] $gۚfSɪorԃb:@%IE~hepK7|3z!/o[Eۏ%u|UI\$m>7j}OǩΒ#.ZՏbr?׽>Ӝ [NPs3);òT)혽8e_Ic 9O$QkL$௄WU%vcDbT t}Ǹ Z-|rnrl66Z5d<E(5s9Ʒ0|d΄L=[RWX b`` 20)J.\}5pI؁52W-)T*H$Z:]q~jDضU}<֮]ZgQrxS5 v5}]I6̒,1%Wq%Ǫú\WK*mq^&:gnÆٗKZ:΃ g;Pqrk0<< `||RoUy>zԀnϯ,3K^TҲ$r$Mvͪb8QEBs-tRI$2{Q,E(_c0wuك~2y[o144l6 =8ε"D@lɛk'u.kP8ޕj鲞1ރu5 ,'7x#vލ=y NF~0 w788$hhNq+|.N>>*ٺdefmXҙdSOaʕxӛt@1G,شiSډP{+98.R"Iu-;,O˥,R]đLs@,ͮ٧n8B3 J(iP]K;k >N->3~fLS+ [vڅ+V/#42?C"@?t]K_Vqk'd;Ygq4Eث1p;;pbI.cʩNd[};J%<d2x[N:=,Z- 'i IDATѕ-4_=+&g2P8Po.*d)Nj`?-b\~U.y.)Lؾ};n& o8 eT͛+54 p@$\@\mdr|]G%R4 a;o5H(5Zc(.,G#Bbh Uw؈4B\[l97:+d#)֑5wƯku2J=N̜9}}} DH\$N['"ډ!оNF'*JEo,ۏ=à g&ikU#mɥNj\:7z4FFFpM7a``yk|L1|k_ٳ1{쎄5mڡmE@ o㈭In*@[R+4Ӿq!u[+(\|*q՝^ǝ7nB)tqD(BʺpwD*ePls`o" 6%qڪNhB>@imvXCٙP@Jպ]]O 'RjZQi6CjEΆ;)M_;ɥ$UŲepwN7d2A._T p)<#;jR=zvͅ[%p SS\.G&qV=qMpuPVAeg4'nEG?'> _oG udE(jU ʗ%]D@@;sod]ϵUEO\*LTF^.&S9F'qIc_qh'Ӂ z]w}QK2u)kFhWw67+t66J-mTZ^Ow(J!=YK"*Jhf)VR,gc7HI֫J}<=oly4E]׿n 000Jv@vϜ] Nl&>S’;Sm?ukhPynKJ3N{]/\a vIMr%؟N,Y=)qiw` Cqj@, p .g#۽X@ y )|WA]UOGװbl]^8>dsc f>(ސVʱ$Y6AHZg#-?1ڷ.|>:˻$]\nKGNxL 򺖐LV|!eߪ: /zϴud2|or ;t_1\5ٺ\@=.jѸqKHq8=0턲h^ɚ4 Ut)tߺ\qVúFB#FBb,abx.+ UVcST^ 縆nucH+ h_ڞ߆bz/">cΜ9x|lb֭o~D"JDa׳ڟKmDȝ#'R/yNKry%;ѱD:U7YZp `uzo.066  cܹT!$\ U'/@ qdq+MLm?8٘򸈗V@w"<ގC.u!36גK;0wШb4Nĥh'`.(uAv>K*]ʘ"ΘNFBǙK>h}'[r14j]WWW$fk| z󨚓L&J;H\tF*׿+8wٲe f`81Bc\$Ψة*4{%g@ :Foy]xC$Hܦ.콸A '͸ԐUStL5KY}?qExǤڊǡs=n @TRO _mk,%sʭ}ςmk"7Fq̞51b]uxAp.ܭ\c hKk,Ґ$WlJY T*G7>pmܯTHB\p2#a}#XRc{ RADTI.*V"Ovuj\޳Sgj\}m ''4<ƊH$- gg@ĒkXH*|@hbmָI()SOg4 N>ZJu\mNL6(Gs=˗\.}o,8ضm~n^vL Lc{e{]/mqJ6l"$lzDPq/kW& w8"CܽZAޯW 8pLs]ϒqk'u8BIQ.Q,Q.Qj,V˺uV]6T`H$rU(<؄s+ cfL$Q#cݎdih<ui-Tʼnw|,IKPtk%RāEPRA*իq '8},cXn]m־/%V,J#ڷ$նȾ1 ۀ~ο-́] Z. nn.%! &EBIL# ~tt+V@?8p8cYri4u*&yάz l ymyS Xn ~z@5Ҿ&Ce<}:ɵЉ{hYtc290&T'wPL+\x1V\&dp砩 cv Hm \̥!(ijgҸåMcVujɫUK카n ~w8V`Y@TxAq2sRUz]]]袋b طUwމM6a̙k,v;Mh `Tlz>gJ,iT"v&id&<$ %⹵jUBFBwK΋":twwu{_|j"\h>'Jf;7-omɜY!&9>2=eK u'l6Ct\|>L2J"lޟ:5T/qCDz'8ix%r9_VjKVw|38S'|Fi%T*I  \Fjo(U]v"5\DδhUS6ut߳Qֆg]tvFzMvAƒU-< KVjEI]CIukoJo|hZ1clVo* QE^L\m6Zsf[3jN䊰=O3z5O.mX֑NTyjk9_cx{{MjB6P'lz=\64+}Bq!ƒ- A;>w0/M%DV%M.H;$v"TNFV ffk)_&]l<:kFQƥ{N;nD-.nf7&0.ƺntuXFO<.\?"txo>.1D'BJl2L8೽2!@T{j%2p nYbk,[?º{`h\^z-kBbUVhsuC0ْm=/nwэ#nvbIdr"Ae?O$I1;2Jڗv:Mo3zqR%h4d3`ݺuG{ׇ!4Ѩ<`ƸWX5Q#Ձ[c8+0 ZBw QU<$l]~1lWj4v74궵dzݹs'V^s='R k׮Ewwwx]&H dkvC㉕kv)4].eew@ )'HڬbqejU]!]uUHX7YCʉU+6CMCl4|X,b_Hf t =jNI ´[]"R)tRhuomi%xACDHFrJL82*+"wuR^"dTŎ5:>Ij6Z֑kC+TTUl6dD磘6 4~f^eH"dl :ťa25l5UpI kh wdPט v%Fjh{pX# Qw3h@ +&M5lҍ.2+Ϻ}줁U-C-pфM6kŜ9sڶyz#\&;p(]uՑ I] L ZuFZIV1Ȫ޻ܹ&}jƂb)fL1vگy~>gօN(FGGqupg,5~iǶC(ʕ rAd"z"^ xo"b'?X~7H?{1Axޗ}f|rMؗX͎VmhݓV"TL>4ѹ;NsqaX"  T~xF7ow|G.ަeH$( a2~$D}X${nrVM~6Wa qXǀaPq倯iPK )pu8=^˪i64W$["JޯJ,.CR q饗UzY H$axzs НN%=VTw2?g׉?Y]^Kުv*> jl{>A[["ֶ}~b$5~uۺeLnFѶ㖍PЧ*%?o~_|\pױ+V`׽v]v-J0׿/eutu>j^վ[xuWհDX{-Nv"VYԱO\Pɰ!nLޤ;XoF.n.ua'۶mâEBdR9%:xkCX%bl^ݝd(TRa۰ԨXcP劝 G˫jU}htlCw)xOFղSqK U]/;d Ӄ;wF&)"8IC|NjP2#b /I ꄓD+ v<`ĉ+?a%tSꄍdNjvYfG~u*If9NIO׾jw^YD{4ؒ,c_T*CTB*/1hP(-{ qdq"PT*0661T*$k>jޛfvR]֪_jݶ&J0\0NK(i9.Q³ϢV:|s-O?l (d2Z>V K7zF闌Hw*Ɖ[B^FHJRh$H&H Ha]" ]tIrˈ G̓NBK$$PV}gjhUPdZNJv;+e}XuZHZgl~hhzq,cشiSH&]n >~ҪUJB AI/M\jU*콰h#,&q5 .kDCRFlV #/CBe@W/Fj`/!T /gٳg-Z3f}x2.;#=8FZ nڟ+ FGGW"o龾p@t:/s0XVC2Y*PVՅ\.0FC81p.[nkE(TYgWu2P $XH&5\ @H݋իWwBuݴH ᥿O~;ԅ9Hދ?+¥㪈*AzְpBlذgv[G-[2J30F$II#)q]=;}W;2B`n'9Jz$6Z* ño| K.J%?w%z?_A* _7h;jkNw:7]I"xL1Iw|E\&pVЈF)QU(su8C#mQWIKw%1ZJ4޵^I]ۚR4eYqؾ};?xGH$DH0IW Ju3G6秉dƻȱG }֍OQiju{T®"D7 V1JjmH8&χkpDj&Tu4$fgD $HqKB^G㥾_۴8J$FW&KІpra=;%c.O%>D˯^)=c8U* oރދ=s0sEKc+FRjn%vӸ'xfI'{_Ǟ<*Qdg&1V#,,IW9gJl6 85񷮎ADuV,_oy[v܉{\.2 ,:PZW.Ejᚱ;mFHE؏tPԄ4PIx}} h_%b>H$:y+{|P(D^]uJ= leZŦMpBŬY&lhd,WH 'KT u*j[7n}.~13fkxNIxW}(lmGmqP322={`ll,il.ifUBGMuT&FJTUR`k;ӆ[vg? e4,:8vNk#j<# |FXT6`~tJlUX͐s͂`d;M;SfaGvRΝ;` o8 ƍtR̞=;@d\6V5B:[bzI5We]IVuj5FSI0N(uVFcM"D*qJNӉ}MD(!E`|w\YtFǝŋSNr?~ӟbΝ3gNdohUrJ;}DڃdUlkup.mȎN}9&i޷HzU~U}[_b|gL! Bx#H PNH9yp$ J"DQ)RpCb86챙a3]V_w=-ߗַ[N:G\Bݗ.]&,A3T{;ON\+R:=goTi0D]&L'OYQlGD*/ S2(ԓ"vH9I4W &3Oɠ҅99pʃ29ZsDשR+z9gO6ˀrdtS'CW^: 4&r ;Ͳ}zi\&^+xP@l _髠-dcv#wqn n('''Q\`2~v6_&s>n=)wG *dqםR\XXH` _^b @o?nâb~Y>G~4#e+^a9pQ`޻#y2Km+ʅE2d5h"V`ksu*GGG L N:UڗʞPPV*0(I}l/i#u@OǑ/C`)#^ ʈnrJU45̈́ԏ^$bbYXJ{M4Y:>GGGbjj x;V,syrCޚĽu?__qh969ױP8r@6i؛C-T>ʰL.--%rqqv^/Eh8>ljQI\#+,UPT1+T/iee%Pr62;e?h`uT`xTTjt"kTr\]]-L`YP*G.dkz4^e0ٳZʂ&xhMWVHL@MADŽװ^.t,ȼP^s N*J#OuqZdXQW)!?w:m>Ǵ^XߢV ,qqBYET6ksp((PJ- :yٶd&F:^/ۢu}&v`'Pi @Y4R?{`2*ȫLD6:;yFzYkKKX(ʔsj.Xo-}666 +S'''j2$ jZս*#P3 R-\9W DgJXOOt{94V_2 8}4fggn -P,{Ǜ}JLj~fT1`xn/?\MT%FTe|TlpHwS %KV o))z֫uD:嘳-rQn??c=w][/K{Q_o=Xp,4J Eeޝ]ȁ:8"0`1^Ȣru3 p8q"η-xGJ뮀6$vrii 'N>@^ǡCR?{l,}[}\Qvp'䒃_Mw$naO\R-JW`~׮tn\OJWfYJKҍ=u%/+>Gƽe0hoL90Β:)N@s@ysc;?"4g]vǏkWVVE[e;k򗿌 .Tb֘謖/`a qoKzA9d@ֺbTEIy6YFZV@ Sg {{X_2EAQYʮ҅AʶpzXYYǏIr8?S}e 4?JA3hpjWW{JWfSkZҧ Θ9l(%*,ND`7ǺҥKC*:e*NN'ٿnmh,ϵ]P\hm^W9ԫ=-=s V W>/z+M{p ݙgϢhŋP]o455@ry^)jNZrq:fggwl4iz5*)2Js4,oМ7JT t6*tn6p42 ~fsssi)Kd_zN#MY׳f\`k*Ǣhl\b!bUzRg:-P]pQGirr')>.szCS˜:JzjgΜ|ӟFjJ^ETqLMMcnO177Ĵ6Up{n;w ziQfVr'uPd8EK{_~$I^֑?O}Sx3Jŕ- \"VL4`4!Yv(0*w])S &] :U6 gAQŗ3?,*j[XmKexM>?15_ΐ:畷m|soou @?ϭUs:~rjs^8̅ķ+e׹|j[N>SN_]]Źslus;̣K/C0G,q4w3r:.TRҝmP=MPSNam->& řA]yGzxrr3"Ο?SNazz333AJ Y6.h`C 766"&fPw&Oz^KDМpD4fg=4y-v/O~hs +9O_K&L%A%ǐ&P;(Kqpʈ`IY/PvNW7G2c{x_YQXَى; fzzB>iX^/\qZ SSSOIwpSU8َ5Eʊ`0q١kA (B$,zɓ'$,//W^]wݵ-X@o_O+sDϢT2KF4=)pJ%"|߹ǏrP^ FWNH9|bz^:8pRXAΝ;/?1^z6Vxb!1==Vy֜7IAz b)#651S 7J2./oOk5dnSOa߾}hZ^bQoA`P8 i*K`MmjjK.gi>k4 au-44BTr*>+b }8A<};?k/k~`zzF(fYcVWW199d[^qnHZgޝ=e5G$9G]T> Jvy`_)3nOٶjvs=9'Q#b/ȣ;*])\xʹ^iqLkkk|#xG (/C2Ы Xb<я~}Hfߏ`zzO~cǎܹs|;wƑ#GR.l @T8Zf-9 $Evy=R|Ieؗ@q5Te=քInXngud63wG8뜱װYJ͵,_GWw* AދFD鞫c5rO x z)`vv6wݴMDž H)wC EEgu`) elh>;a =l[Ʋ6E)e6ί^^)kX^LLH2튼 #[и Ǔ?Aݻ]/" CaZ I033)\ND eACB&Odtk͋݃%1U<,3T*^]P/wnhL#gjY9\ϯm"0#HVЭ̙cʾWЭse~G @^VM0: Ur{zÜ?} 2l2$<ب(Nj:]uH}AD+;p>䈓2ZTFәV&e?؊V tFqPaEsܘ+Cϲ>t'˅ pxѣx'qܹ0QvttfE:'WO̭oKG N0={sCs 2Rʾ)>Sv (2S@ظJRX<5{dbuFb@g ~UIk$0JFi_P]]PF8DŁȔw;y?[睧 4# us EE K)ii`(0+++qy8q"|Ι3gpQ4MT*:u ǎktt4mE|là׍+#-r2Aj<}C/Q}"q8zuZkPvбVֽZX|{^& W)๔V9)Q~(ON3Džm =m&Y QGFFD4ዣ\Ƶm؁A+4t~ib',QM44jpxȌD ٕACj8Z< $$L*#ը z B  t8|UtVFЈ׫<[ e9l =6.ŕJ|Vմu1` )IȻaKe\2FoH5}<3H qT!fd)$xpWtہMCY:a;M 8sLȚnmWtG+=/s̰22#[3@r4"Ot'7e)j XJ}?2ߏGy?|w Bل;𶷽p^a9QVfD+~`O#LkغuּN jCe&H'WmV%J ۷OU:'-9=D3)>.;en!@`ȑƝS<< p:h^l_Z,ryҎ v:\tu9P6ŋػwoP*EItP!r":b^MC-?76r=ztF6W(aX:sۿE̞=RRbONt̠F|3>!DF [0w/@Q"RB{}݇W^y%z߲r-;Hdu7k&T@@ݚF@&{Nv{9 <W IDATikp<սR GVA|EAʈ* {Μ9T@vVkkkX^^Ṇ@PQ/PT4q/pov7h}umh @Ee(Yg1Sc|2yߩ.f9;|u|^k'RQ8)/kkkxWo~;٭~}^+Ǐۗ G2"Զ)G $zQqwӣ~$;waϞ=4ːLA姫+zp's'[r3tTsI<Z}ۀv9J}}l*N"ܳQF3!P`_S{p~aS=9;]kZIѩ2L^C&YW2quu5:8Џ$Pd4o ԍ^ʘ>yZM R6*Abɾ*~ 9JxH9#mke}{Nn=x{ELutW'I$rT܊`+tъȹxѦ9#r@@I?Ȉo#TmM63%׋N.ý;njp㈏?{mm-\z@Lum\+U7{^0kJ {-;c^:^Zg kyByDTH֍\S0I\1* xiH>hSC̿*ѠF\!n*2vV +DR)@ :kO>U<H\ s 38d 4̤́`0(yȰ(lzi!evee%mz:;N苂XWe4S5RJUl/QkBn.9}2T- 666_%8p5]pر bQ珀27j5m'W(%2'0|:=Evmenn ӷgGGGqmayycccY@ l٪ H#pk{_o(OIvO=,b~4@*9+DV =yѠ;PpN&=ajRVOn||zTH"XulϛTcޙۢ:;B@1!IȾgBߞɔ+9gɮu@髱!P@9*9OYp0!`UgB se .Չpg9Q ɼ[m/b̲+Asx}}=-R,rf{?rs>wTyu*G>:7pNt|_?C]6Y˫{ NǏ}N 9\qg OW9 9)Cs_uDkD8`P8(97jHۢTTѳ6iT028({ Rڨ2 yz-=2#P9h@!_*z_U9ƤV-oy .]B_n79*vލݑuAY?W+TV(/^_NъpokIi^ vPA"s6222Tn~jH#`ϺhQvye jPB#nl?CVZ;UTRPnz+4ZT]qέh##C 2:dʃ_x0R`rrrHp9ҝY.ҢLY,j{d9.#0Ay=} AC>,+IXf!)N~]d+-ֈ%W"ҝt/o*1r pGvRBݑW69N8QR=sdx׻ޅUBhRrq㋡oOA!nP/g٢ `HWg7Ք WO4Zsz s=# :~nhTU1uE`"4h]h}j5wdiB,3zc'R <@oT.йz[^zNT~P /fn喂=QƺSFJ3^$<+c }7zYz8 (=6)|BO?C.:XwQډʮ2$/ ׬ ZJrDWb&(T,з:eZ]RBYp (/ᥗ^J Ӵg!`yg+) k8+TCj׫̺u}Z>Rf2UR*[z ۨ S"?eGۭ55@~duV!b4KCޚ.FD?|3&wOxE uA@1/?wJyԱSD9j )|z=+8yd3N9"RYIٳ/^ 2^|EwFMN*ֈiK_?E$7uhfyS4~7Or"#01w }pnpNDGM}c dts~c7:drLOǙ,u V.Y("ԙxegggq<ӸpB@DxH赑E|@ўSؗlUxKzQDuQyS~aT uOΈX gP^{ ۬l}θh]{7d`&y >3gJ/bC\54T"Pc)_WWWqwa^%S 򊔵3VJ%C*g ǖ}iݢ:s<˗ʂ]Ny,a: {B.tgr}mrݢL)CL:e`uQhZ1ʹ3OC:dU3o{#_F܌J^-t|ܩ"0; 0@X뤈0P:icv1ћ(3XhWՕz6}ZZ։~yfn=q5j$`^-s {y<#oݘo*;v5TTdP&(;\cBAQZ-0b,4\sH÷tH\ ,!|!dT4M=hmI$u:UpR FsO#:::nyee%O<_4=xk:c 5r3Qs'V__(<Pp;)Յ;e)O-"(yP.c4#N4q>Uu5iZ"߽{7LNNQ޿ $9WƉRn~KdrvQyJV(;±fNP999Y#r MۥEtGC^K5X<9 bb`#cƾW>U}GJwkʊUUz81F (^|E>}t hGtnp(SjϨHhXdF;3Kd;*8{,;$# P=99zZ z,IsWVV:*Wr W'I]ʇ〜M^ʊ7 PV*:tKKK8vVVV ޕ2&;F(iX#õyzv*f4NyddFsssINw)ջt4߀ޛ@RMYqs+Z \:՘yEP*XeB"@yDZg4MuT*s=X[[??`ii)Y ;)eJS?qLi.CepEXe`A//mo{[mWs<̨tƀ-0 U$.Rǂy9E5. j&62Zu-GcDzDu,4"X!UE]Tc?D}߮o¹E>p| mS7zonVII(LLL^llױ{n9st ϟ?Gbvv333ʹ$sE?ǎPP8̱頟y72@NЎ({u[etn-vO}''h Oרwj0;;[8jax׻ޅ .`ccFf Pt¥Ou CԶ<7g8;cV8^Tzt({CnnvSZ5v:Yoll$l*ߏҁvmm-ysdܙXlW޼G\X7W(Gi'Z?~zjԅ'uMM=T>c\Ăk_8H}zUM%#kS c.EA_n/ܹsvutto}[k׮6*8vp>7Vp3zn̙3{-,@Ԉ7&w&NSFcWMRo+ mK/s9x &vq[ ` T"Dsg'u] D=9RT[Tϕm,R&''{noӧRވW&:L:Sk"}«Eu8+󯞾Ae jX%LP]eddo~C_49r bddXT*1awhT Pq!F xhˍ=`TGN'[Hcc+٣_zJuKcCo>$Xs8+P7C"@ e Zb~~333jṚ9"'\~(PRFBcY5FM` תRTcii \S=XdOItb Qq oq E˟ʵHC+} .PcsثZYYIγ.^R 2#y:2sN]7:N2>ɦ.ipi9s.l;*e"jHJ3Ǿl(TG ~VD_k+3r~q"{gΗMHqjA%25]C=΋کpҍ{||˅ƱŅ ?>ǔ vtҮC9_;@Yh ס_Wp |ӟH]}5vr/=:_wRhk9{vcǎԩSbQ>V+T066Z`zq̙p 58u1==Bw~z9@: @\Ps#C_D[:A q~ØɌxNֲP7zgI7f2 z?$rqj5LOO~НV9k=jT4J@ǂjyyyM+RZ*W RB<ͱ–:4"i 4tsr!u*ܹT~@Y qWŪrLmpe}FŢFԙwtTh)ʹq`0(::.^_fDR ZYY΄-~"ˇ%#SEgD3?C (tyTr:inx(o~{:V%0ɵd'ua`>tr/(3LG}{1lMd47999䌔`H4g<ԫ %lSDc ;$StJVS (:qy.T ;y q642@'I!P.)}{'ӈȁދ/Q6%X"Q@tV_e/uhF rav-}SmzRCYaJFVfffځ)żՉicfȩ0>>cǎ ڵ F#"@Q&A~ńIQct>PRQVoJ;ޏt*~!|(Ҍ IDATJ*kSul+egFGGSR772MV566Vv3? ϣ9]>?ek#̡:$*|؆U&kteELYeN(xz~F@蠋u/4 +AR޵/X'd%W?>2FQ(y9OcNU70u}!nMɱ1JN8U\A@Iy&4<ɓNU`2Qίbwl)DU\T4P':EٽI`+WTFϪT* =, z429`ms h' RPdxod^%PT#wφ ? EVU- +9uĹCdVա ˖;3ns:%/I: 5Lϗ`\\\b%5\E}TΙI3{]ř^3Xl;X$RH: pRS|01;^?SӧOO6'W= 077i4mdYh('| 휚za2rJ.qQ}$v]}PG$nrEyhXlccϟ߬O+̜ َfP{xʨ`~GNic~9"ܫuPBžrBAhٟ*x3Y5ZT@ES bcc#`4xʘkT*R1SCG,)F'NĀ-p >eME,òM:=>) OCI\?9/J}6sJ$*+ 8^ =oll (c/3zuǢG{ Wo9Ehl2 8X+e8wlG(󿴴/baaԑ೹"{~~sssiemG 0zIr0M,O)|k_K ّh>H/S919g+$U$ PCz$WG^%Jn&eWT%)E[QpDwE2&9JZ^5Rۍ!vR$ @bN?/ ;A+=m/c[l2GvX( rrca޽a2 RYm+h(Ι*r'G@7@?Q~G{TUygDl6N묬DsKCqoQ7r,A 1ϖS۲p=zWy6u]سg:tq/N4q*:' as-P,"TTqf:QŋxKe^ǣ>{sssiFtydko^ [^T=2WVVկ~ ՓeFTTz$ՙh᡿c'YFty%O*rE "P155r%~ʙb.gMS` l#gJe0wƨ|Jntt4)4) 2P܈ZCL4V7Wlq\nw{`3O|"yrMqd! q _3":,5HLX}@KIKw QC9SR?zpS8l{=aTh`ff&9QZ_b0^WHQ}e]r&K|O],z2b)H3킬~ 8Ί,c]WB:ŋپwߍ+833f9'۫ͶqJ$bQ8{,N>gb׮]AUdlp~s1Um1GFP:>:#OgZe{ieA )YacG(X3'er҅M_BH~9A cTG TY RQ$>Z=L#bjh(1F '*bҁR~ .V stL{-z/;5H4#g{(8rTqZeYVUsZ N+XSݧlouFb;x/T* 8:`%V!ʗ:SѼS V(43_>% ..ٵk|A޽;-nϛq aAM>33[nNJ}ӧOرcΚc%~9I:mMQ..QR>eQv8X]]1??JG ,يhu7SrRPWpT|B42 9L"c DpB{;^ڇLEɅϋ+PsxIsJOWoUv jNZ'B_RJUہ;P\X(G ʌ18NakʛedTW֕8{B(tU2Z`[Ɔ}DG@/'p{ %"qv%5#VIFT)K`:?SR!qˍ]Zp+N??_ѣ)}c޽N瞧`hllT@Qژo+?2us@ҏձT*)ti~z1SG!f211lv`0H!pmVcĉspEctDYIo(8b6YV w`"Np0̑I>UcM/!33&d`?xN3[9|p:;GBR%b3܀X;S`h#Fsjtܤ^QNY `PZn|PhS8k/m:FU"euw80u$_z_5n> n-ZqY#!ʤGs"ǫ4!yX*P`ly±g{VVVpÇC%frzzd<9o Io7jft:hi`|y\xh6CiZouZ)T&ciLj:LRf}V0Dw~D+D,:>NS (/^o}[xߎ+++iP|$34%uH#=*%׾{+(:~7 (0vwOΙ(0z>I8`LMgG"E>99BˁD>gUYR/WUxp7'u:ҙCW[]]o}݇|#x' I0&s&W[Iw:nH=Nh_,cLLL$嬹EbȁPvQ7X_U(;ľzs-nynXf;R'v'?t\(7t:Ǣ(3ߑAS| 7wXSV5%nciipDnTZᮻJ;UQXR^Xqtv)8tO?wGYj*?ɎSu >]uKuuȁ,e.8|0^z%?~|($Jd||DFFFҹӗl1]%y jL9h $r/v{ Be,NfK\)FM40e ̃`i24F7rW/&;N iHX)EqQeee~;4_ezz?8N:zI1([J KawK)zÈ (vF&RRIG{>怔?{ȟs1D Ry?'uZMeDU*pwM;C7W}Mk8tz衝 5^;Ns:E]h.*;T9^[[ l%odJ 2ϗ||ojUʞ+9(.kll z333t:t| +O,X#Wp2eXԦ"@Ψm~Q0^e6;;K{8_}(iݰPAU:tU`^O9 3: *5t 'Ͱ`\j:h@zI}\D!bO*#䆐uzE}l#|3GlnEVj>p4. US_ϕ~Q@N 0)3dȜ!O,& ]tKuAG2;eW2>9nD8g''')6@?A=ӧ~6o̩kZIT{RjC֠(Rpw'|$|mk>\{i{N>SNeáC…8ֿZ_َd}U/qnjZ%;v .] ˴!;WM]A:@wpJV[v%oB7Ŧy>MOGQirtpq"xDzءzd5dWdu|*hem.eXX͛b!S"H{6b[e鹒nhH^KKKX\\DM QC8Z͛~qfRCr Q\c\ȇ_円8{ jhk8EU̸:  )Xe95TpNТsйƾ0Ο[M^lӝwމO}SVh4xϞ=?T*|_ǓO>t2Zk0$%N :M|s4dۯg:ח=ceG.qFl-l+Z sssFZbvvw{ERSO=ga,v}Q}<D Y,OFb3=]J1y4-*Z صkWʝtvR >W";8ZV!uȑ#wM#oxEnt_'"@߫,y+X@~;NccGI߈_umY*(<;ڏդ7}`s/_N(+j?ؽ{7c cԎIu'b>TҎ:|aTg^Qn'r퍮ԺU (`~~w_CV͓=]7V6L4, T\^e0Ή˼/ wntt6DacWY;o|mUYuVB'#3v^`03&P QAH[hqÀp)9Ůyw9Gt]+}%FBtu%v9}ο猔ny_]EJg# :jxUzdIXd}vڅ{Z}ȑ#g΢V%z$盶>F tZ\\I$5ԏ:I?Umf|/;?sLK1U(+| *kqЊ+xp= .8v<9n7 GmsQ; $ۡ[pz+JʍT0GE+Kl=j=@{G'nNn4,pX7.ʢJ7bQel3VEPAYt8 SiVf^V6ǜ˔;cMo^[w<圉:f2FE;새Q^su# ^{v>esRy{E?k|Ug'LzG?}[n]j 2@Yt4:F,{k*y,e^/sss) \s{@]"B'b7u+ (=4N;I@Lt)Pj` (nYl6 yUh=YP@'U2\:P*8UjGM|3]Vd*S,.H (51BF,H /ܨCiۏX$:+Hg {1T˱jxԸyz׿[\8&b4 z {9:T4zxUFQ|a֗DIԏyQP-Y޼eh::?V އ2D[NHއӕ;H8NLL`nní:&n}Ngndd`/4|}h`nn.e||n7apa4ѯQl}m1u}U\R7r<,JP|xF8efE t 7p *@V +UF"NcІ lS7c7S|<<9WEg-=7TsӜUdrZ3FT)&g{#mcE /ʄ!wH:tDHXLV855:F.%#N`Ɉ;0 .UR+ IDATʢ<m {}37W"R`ox?}gQʗiXg$tZfgg155Jvܽ{wZP-,-J0t(r+ (&ohVfv?бR6ыCXn0%gwN"Dα*@qV-qpe&&&xPxV:@᫫jC!pT5@suSZ}P^ʦFDCtJmuoG3ľǟJBJm4@ɭxL0|ԙQQIϱm*<|0{ #|4E7P]t~&lXh#}I#ew~U*Ti,olqSʝUYt#yú͞2}gBwp4Ҟ 2drKKK ;0Lh4 {vh: IW?::N icv%؆\_DH6J]u:!*?*C|1֪\j^ *+^Wh;(r!Ji@XhT9"x0PDpr6Ġ0Q܋zeN?J(1\ۦ ',20C z$xD 9R_ wjHYkcĘrj"BV] J^s,#&Rr>U/HCFwg5Y8Zz!NjhT ACv"'E6Db%yJ摆fSSSf0-,,ҥKXZZJWWW V%t_ ~FxzNY 귝I_ }{<E$JU:ӤJj8vWAq:P= UYP:2zOH}z=RC ๧Cl\3r*zZ!C (U/V&zx{=z'N(`Di. ʔeʸu.΀*B0G:Nbzޥy޴mnll, FFF:hyE9u]mtR@,)ҭe2r0rSWa=&Z/Os]p{o嬲\I`$*x 6Ka)GcHV:PdU"G J`Q(9*MӅ.h*]_F@,Ys͙E8*;dȚ:bLh8GC )Nzv^ 4# ſU&֝2r>|MWpY&#pmqgK =Sh8T*t:N`t{4r9i,y &FwRoXJekOB Ոx&|s;>( VN.Z-p\Wd[ ZO=(F$5uJ#][s _NS mf;QG@:xuz_b+kFS𽱱 2WP@# `Et7!VWS'- CN|nu [$^qh~}}nwh^W~F@ Rߜz.; ʈ1# l6h4pQt(wTLe7r!`M}J%I,"qc} N+22pt,Ya]\kCO}m&Cg,Uu~Prvy~ƖȩpRE玒 G̩Դ .iDDrs:;'m`0(})(nUW( Υ/GQh{]7e`="@kP~?VHG=(` AU(7G[ 4PD+FFFR9UGً&u'BJ{cH< a0$@=n繣)U'A=ݐs&3?U_4f:VқFIQ2Q3 si@Tw{e,o^t{ R#hTs, F$?GOvV 6$(H"h{ Z-1;R}\F}NE.ө(#ۭ:UP*`H}]yRx8d 8_\ZIGQ0=fyyjFfvznC>t:i{$.Xl6^^^FNGB~6W;+ȉlpNEeAYM_gt0 \ :>7K|O?6nFOfۂp)wQ⊎E 7`@̞P):\Gܰ@N$g|mll$PI}㲮 XՐFTEeqT󺺺x.,EAHhϜq䋻V@˒NΪ9Ty(:I ݲs>Ou9/gQ_z[vR~+3I).|g V"U^Jaff&tN @+'$&AC.J%m# ˜\l)]Mld4< ls! SB\ݚnȩXZZŋqE|c;BÇT*ַg&U -(=H'5V :AVdͤ" $ WvTTУTe\xϟ (G+ S9ўΚDE!@w(폲^7q}>?H'l:ם'AT\ԉBp293337vYW.XS 9^nFe,+V8 7_V/y .2em(>\{EƉ2}ދw@1Jw𽲝g8k} Y+4]I:i;g7a\^^FRw `s@ϢRN\\\ę3gtGM磲LNNO׾W^yN'TPZwP*?(g$9Z#}t 6*+ Dt?C7fINs,*cR mQcx?U| 7 y?=qjНԕ W]%_|$ *5bgjp8ǔ9pC r,7/YsG`߹ԝh8rv:s%P줎,Η(&:, iG@<>ʟ]6~2}zLK9IEm:X.ԉX[[CiE(޽oV*uv8'cߎO|0W_EN"`eeuUu{o^77I4BB郦žx@m`yeQ22:33,kea]tJ-( e( ]PH{MGڄZgsgL&L&Ѐ)SkO<}&nݺPկbϞ=ZuM~٨&U_Zuy.ulj*L,IQeɣL&v0ߚ-K%;,5Yv5 s 6l$z<ˬּZ..e4ާ|K(鞢g2=N())ܗ#1NXDMܮs % w.s]ծ]Rڦr-ɎYlC6Pzq9?kcڪ$mmU۟R!ֱO&ETb jrfIM $0Zj ä*feY'?I7.5vww p(L5j(ym۶ᩧBIIIP> U[n%xo~vNIDR\5E𔨰ˮfn]}>{nlar d+r*%}^۪JNɛdIt}[^ҒTZ2I7:zlGtCєMt@z'rvdY4:,\{Y E$Aii)s NZz/{9᳿SeJ2|> )o] ˥H-l2k[U'.\pI| N`1q+qSj%66YJ*VYP δ:ddEEEH$th;f$균2bժU8Ss$LсƀVUU.㝦F%bѢEbxDz[FG" P7[ZZYEb z$J4CT4ִJReP̚:Bbۊ+;vd̪:`nXKFZv=:yIIU]j ˫} tP]'u,:xJ:5o>8bX~=lق7x#P:I=N*WO;j+})Υں\IW!H'[UEBu8g櫤_5炒ZӾH֝>K25i =-No\'AN8Ĕ)Sp>|nMּdIo}|:hyyy7o^9%Y%Nc8쳏n+**pwG455JJJd`2]u kj2i|U5%H%յ^s߫ϦU>luwtww#Ja(W[qJm'6]M5d}]mu8T>;`zP`.Uds)!jaJ{-%J޵y^Ux=o|hhhf|ޚU\itgNYT5ӈO:Zҫ-Y">RP45#a^uRwLWԩS{Fwww0Np\gUC}1RUVRbJS8PMV{/.˄0:YEޢc~&vׄNV-M9r$O~N8$ eg\l@UTrɗ O<>]kZABĀ AX$SM])oaږ{g]d@@-X֗c2[^%nM\m\ ^?}uZn;PRj`%\e/2{2ma? Pv䆑X,T*8ܯI?XB;vl$(nμNTэD"BD|%ɐ Ʉ%ɤ\*ΚXL'\Yk.viY U( :E! / Db!Sx<ǒ%K>&MI& oy睠$M5-jٙE(a;Sn}/`݁w$aɒ%b8<̚5 xׂDYAfM˗RI|U/"IJ5pUN+(h]abdI3,dP'ߩ𠁸yq.rjۺR K'v}QbLB伵'Oy|Đ ̓#W_ &^WQU*|aڿmŊ/**Z/H#GDyyyMQ$'|a蟡5d2hiiA* JZI"|l*62ύP[ ,kܹXta%lb;M*yH9EZ\XXuʊ gM&]iD] GKN,W8j_DIIIHPXҠbd*|~{^%$@/@'LR\ZU mX@h} PYIdhz:ʦZcz,;iӶ*~cmknAk 8]aĉ}HJV$eJR91.Ҩ߻VuT9րH$J̘1#T&{ǽ/))˿ѣq^ ->ѭANʦRbT7٘v CVɾ?dˌRUy:UM>^ϧ_ IDATYz5.455L%%%(,,̲TZ-WIFPWb^%ɭsN1e,_BI!wrB'7Қh'l?Yhf3QJRy?.25оRdQZS]\$Tro8ymmmAZ0$%,v,K&A%cCSXN I3<|#F@UUU^G+ՃYYF8Sj*A,ghVa#u)LhCtQX[[[>`J7lGf.%= L祓=m ;9˖-O? D" *:] 'TUp7/_ s0DeeqKݍd2@,qH #;{R(]ff pH lSrZo@N55NőH$j?غ}hgyL_$۪gGX7= ;P9DuDF'T'4j܁!F(tl44/YtRgzVo>HT``lC`0h4s=8l6\/;>#HV~4l;3tUuC65/چkݜa1%KPSSs鳊2R=VQMd](A2hV_E[^|Z+OX뻣u (NXF'I7n@CP.\UUUя~# WxB,^+,8pLUEQ'E 7&FY6T++ꚡiJ*sE"RTbb7&ojjVx>gO ;ݗkU`A]]***|tN3|798. ۃAvBA*R$:677!b,[ oZZZ_J>S%J4%v$*]:ˤ..˄;Gh9϶9KI8~]$jWHU8U&[/,Y3 >tYl^{fɵ)Ak_k۬ /:8#u*TMH"C*p|R5V'{ EGSފ!:VTN ZjY.UK*v?`6ѣGc9CP=fŋc۶mqUb*g.<}>@o>LM]*p!l4/UoտR6TςGh %7\]uttUUU8qɄH$jtE$le{m{x'0wB(=vX>d2曡~5IAХjjv^?[Uﮮ.,ZCIIV!O>$v??DXAsZIŌ$-.BR'x₶i5!t.|4 JUgjc -.A=jPm|^&l*k­80,oIqkv466ۇ,++êU_ FjUD+KeI$_HZ O&ʷ&0a.d2ipr;̒NS#q9IPj}.k!tgY#GJKKQTT}w0ϵY1QeYP|17e"TeECp=۩+tM] %}Ǐ+W&# 555x嗃TrT'-~/fl: TQ&;fU~b&cuY5e[s1Mȶh %Y.h>U.y]{*Is8eW7T :yg[!G(ff$0Qԇ`;Rm?Krtx-%C-YKYP SO=F!I]Iנ𮮮ٵ9,@u653h[J ,^8lÉB]]*++qfS(Yv<d%wݧ5]`}kVҁrt\R)̚5 _|;l0Lsss@&V>wʝLtoɑbL]*$.iv@cgҒH[OڧٺpU_g_nqϿsUc\2uuCKQgg'ZZZp¬eÐ$yyy+p}9>dU\z[_\K0zd=5*wwwC~~>fΜ9do> u֗{֗[:sɿ dV\iB<FQRR&(e'POkdr(7~8z~U>omF]BlrԩH$Yu?J]s" AkoK\ՍsdpW8{%%%Xf ꫯ"L\;٨ dv}n]]]2D0gg?ZZZ#,K~JN>;::uٹۃz5j)`/A2_h>kFiݖP*%zڮ9WDzcx%{Ze(`YbhuQ%ʪsYQm]ZBl۱E*Ji] f.>1cƌ#_<ƻYZ=vV@ak%~oN;1fr-w>MZ:VE*Wf]qVKrEO4Q]]kx8{ H7plق_z"%OE~Lteee1rsiӦ7n J>G}]}UTyd2hkk 3ޠ6lذz*^|E|_<q(..5\JWYeO* +L]նՄL n,a %ϡʖώ/J(_ ]r "%W놿gdR}^CPVTTছn?ClpƥjϑaY>dn1vs::V9I42ٍ̪/v>*]JmihnnƔ)SpBd#7" xbࡇ M”T pϞL%2E$#lpI4Ř1cvZl۶  zJ(8ugikkkR ,3D0nܸx \uUgyB)~l '\6Кu T2.Œ.K\c-;=ߩoɜU7})VRB-Bz@rͳd?(Ӭ4y]YQtsycĉKbL f9t~Ae3'dt#%]*[ӒNaX`k (fϞ۷#Jȝ&6vH"T>U]*8cǎł eeeXh}]`hllDWWWumgY:9rH X,ɓ'#J駟FCCDh("`;mM҂'"xג%K{H"#G V#!хYN/ Zi3ݤ`+Ӱ|jQ-Z9}ӺuuudP*׶cU$yYS>,\)o.KNS yB T\BW@$ &8 DU5ZE&Pvtt`ɸ꫇JŰ|rb1lݺ5Pށ^߰PLTJ4UYY N|A x;vK*L tS"X .IJeˎ1f͚D"=67GPLRhoo_ In}%?o$#"iEu 1I(7%???~,N)SRi uG'nk~ӟ@b:Jpꩧӟ;ɑ,(NCc&.;ݱoՔx}ZLLӥ&<Qö6kUAv&H$)@QÞSB h&k*"%W}W.>gOE5uYr]zQgEV2//%%%Ɗ+_ |@lڰ}nB*/e2;I&FdFQI֭lLZҪ&,ZB"80~xÞ={B޾DT馼1}!=b@ qYgk>hĈxGs9fʹ#UTtsP_FX!ZlBr%@ϥ]j}G<[@.4ZZZ/$"%K=+|M8p 0Jhkvcǎĉ(4'ƭ'َO}dع:|L0!0nXX,: x'w^d2٩D&߭¹瞋s Auuu7pcǢ)!L9ܬx$pJKxin ~*YʕOݢ(H(R3= ѪqurJ d.+XȊ)RXdBmO亽---\pLa|&L:GŃ>OT]th4n3Dee%~Y&jfqQfY_v.&L5kBdݫ;'{.F;B;^ mbXp~!ǏGKK PXX+l#sܱV*v,i!JB-wkZZAuR^rTO%Z~2s 0ǻqT:RM_DbJau[κƪ,Uv1%K6 %0pRqE }k.|[ H2iBL4 W\qEpnl0\D0j(chjjBGGG$ɸ Jie2|#0'!'HvnV{hmm OԶc;뮻ӦM'wFQQQ$M*PZ2inV+CU¬5 /SSQ4 Qj d0^W8(`I5glxEEE(** |n_=[9DрZ5ﲊ8,TvvvNOd (((8fذ'"HnR"'ag@hkjjshX`o㗿e0$t+P7 ,N}g'\N6aXf {1Y&vݨ 0mڴa?8ՅPjJB5kk%PגVTgmɪͥ @hmo%.koPi* x<бHMJ@I( 4kn禹B:iӘ{d"j׉'zf9%MN2|RYGH38;w fwցp͛uh43fO=ThPi{e#ΝT*{&P'/@X1J;.BnJhooH&@J V%JLmd5{W9rSPP(D׶8cX@&ҧ<ƦRe2WIB % UTQ%Y.{NCュ MMM矏cԄPRR 6bx 1̚5 f8O&=tKP-2+)'xo⧦oyI[ɣKW삒I(-_IJÀ'.`ĉG?{' 3hkkb4/U$C$\$%xkXbkJl z>M$NP6kHޗLTSR(m 6ϿQYYٳgUJ!s!)z1y>Ϫ.n$YGPU(,ԥ%՗SɕRPPI*$ 5Ky*++L&C>}@6!)k:II#л/TץERT6&$٣y#FPM{vJ7^9-P&}]TWWoF"87 0@,l2o`z*|V|vUt&2vrcjj -K-usm@۔w}HR+y-v%ƶA+6xԈfF9FG1+ռH+5~gdRɝ6ڶpri]3 r#A7ܴ\T&&L8/`{?0{l,]?s)?@}}=>_:^xqӟ͛7vQ=<<<<<(FGAcccV U&]DH6*T-RE\u]XEA=LP $QK!~`V)9(uF' "ߵ~ ٳgN=T\1bD>A'> ~;K.miL6 ոyf\z饃]\a"뺩Y.J޴uڊ,_'Oph"̚5 s֬Yooù ,ߓK/f̘>??SN {xxbӦM:ujzqH$xG|/2]dAԩSQZZ|;Y9մk`9 K:5ȄDȥtZ++Ve4Zjfh iZ}_J7&s%r+mt=ڊbɸ袋 Po `Æ 8s>,~{kݺu:( 曇}N::,̞=; _M/^N|_Ǘ%3xc0f\{غu+8".L&D@HfbXVj KTvȵͷt:D*i-/@DZO]u>0ඌ,ZԵBb,ڊF̝;so`ڵg>{{/uЇ}CUU ?>$yك3g3֭[C9Ŏ%sEyy{Nw 0ԩSse)X SLAss3y晀Te4 : a\7W@%bT72oQ>@hu\z%G4w<9iesĞJى &`޼y3ȦGq]wk_zk MWVVꍌ9Ѐ W~B<3g֭[|LW_}5?a7f 8IԊ+o>Ǘ%L>mݺCc`(..S3gd$IP_~Vy*%M6jyhNfT5IЛN\TS'TCz, Ih]h`zF"YN+#$:]vЄc`馛|wZ[[Յɛ/bhى?Okw}CqW͛qꩧ8g̘1җ磴;ve̟?oqg-[r ***v gqnfs=PZ }u(i "c*H$ Q?C%$Ѯm%l]D%t|Jz+[R};I"ۑJ؈YcƌW\qo"N@(}MCMM 6mڄ*D*իG+O<nVd2?.]?cUMvR˖-òe˂͛+Wbԩ3+? {]X,qkE&]ꫯň!k%b$V$? Uy%uD%iܯ&n’K]}` $ʹ$&&5b,J/SP&!l@TJVUUꫯFwwXBH;wĮ]=z4n\,~~M6aڴi6m>O೟,,XsH.\K,9眃bK򗿌h4o=8W^ҥKDpW^ UUUǬ\CH$H%H$Nka(**fTr|&f5;벃4#O$ iNjL&yռ2[©|\c.$\drĉXx{o?Y:$R1nWW݋d2&;Ad8:Get5E \2D*BEEO'iD"A]]Z[[s@Ydj燢Cؒɚ`št::Zz2 D;/ʕ+1a??۷oGuu5M7݄z,ZD>(կbڵشiӐ4ٳwygniǒdA-NI%uL!sJ677QӫVB]]݀ЀtB>L%F6P:VIRH; V+xB1طoN9w/f̘oBIII(8nVu]صkW=dG|뭷qF$ s8]nQ}%i 2$mذgqFV]n$I|+*]H3QQQ2koLo&LӑݻwqYD3g1EBFC*駟k9$8m۶Í^2I?GK.Yy$CvKKKq嗣!~Z𚅅=z{:JH0Гz_^h^Fhll;w,))1cdH$PVVH$;F"L0&L8syM]]]W9lڴ ֭u>d2y桥 زe /_[n_>q>+WĕW^<֛6<0馛p}Geu}5ڰj*<3ضmΝ?<.B}ضm[({hll /..F<?%8Rx;mlܸ1 @Oʜ#>o{{;>ⷿ-~>۱l2L2/ޓdq 9r.1,شi/n_`@E{{;.2`_ .. autosummary:: :toctree: generated affine_img_src nipy-0.6.1/doc/license.rst000066400000000000000000000012461470056100100154130ustar00rootroot00000000000000.. _nipy-license: ======================== NIPY License Information ======================== .. _nipy-software-license: Software License ----------------- Except where otherwise noted, all NIPY software is licensed under a `revised BSD license `_. See our :ref:`licensing` page for more details. .. _nipy-documentation-license: Documentation License --------------------- Except where otherwise noted, all NIPY documentation is licensed under a `Creative Commons Attribution 3.0 License `_. All code fragments in the documentation are licensed under our software license. nipy-0.6.1/doc/links_names.txt000066400000000000000000000152701470056100100163050ustar00rootroot00000000000000.. -*- rst -*- .. vim: ft=rst .. This rst format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. Nipy .. _nipy: http://nipy.org/nipy .. _`NIPY developer resources`: http://nipy.sourceforge.net/devel .. _`NIPY data packages`: http://nipy.sourceforge.net/data-packages .. _`nipy github`: http://github.com/nipy/nipy .. _`nipy trunk`: http://github.com/nipy/nipy .. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging .. _nipy pypi: http://pypi.python.org/pypi/nipy .. _nipy issues: http://github.com/nipy/nipy/issues .. _`nipy bugs`: http://github.com/nipy/nipy/issues .. _`nipy sourceforge`: http://nipy.sourceforge.net/ .. _`nipy launchpad`: https://launchpad.net/nipy .. _nipy on travis: https://travis-ci.org/nipy/nipy .. Related projects .. _nipy community: http://nipy.org .. _dipy: http://nipy.org/dipy .. _`dipy github`: http://github.com/Garyfallidis/dipy .. _nibabel: http://nipy.org/nibabel .. _`nibabel github`: http://github.com/nipy/nibabel .. _nipy development guidelines: http://nipy.org/devel .. _nipy buildbot: http://nipy.bic.berkeley.edu .. Documentation tools .. _graphviz: http://www.graphviz.org/ .. _Sphinx: http://sphinx.pocoo.org/ .. _`Sphinx reST`: http://sphinx.pocoo.org/rest.html .. _reST: http://docutils.sourceforge.net/rst.html .. _docutils: http://docutils.sourceforge.net .. Licenses .. _GPL: http://www.gnu.org/licenses/gpl.html .. _BSD: http://www.opensource.org/licenses/bsd-license.php .. _LGPL: http://www.gnu.org/copyleft/lesser.html .. _MIT License: http://www.opensource.org/licenses/mit-license.php .. Operating systems and distributions .. _Debian: http://www.debian.org .. _NeuroDebian: http://neuro.debian.net .. _Ubuntu: http://www.ubuntu.com .. _MacPorts: http://www.macports.org/ .. Working process .. _pynifti: http://niftilib.sourceforge.net/pynifti/ .. _nifticlibs: http://nifti.nimh.nih.gov .. _nifti: http://nifti.nimh.nih.gov .. _sourceforge: http://nipy.sourceforge.net/ .. _github: http://github.com .. _launchpad: https://launchpad.net/ .. Python packaging .. _distutils: http://docs.python.org/2/library/distutils.html .. _setuptools: http://pypi.python.org/pypi/setuptools .. _distribute: http://pypi.python.org/pypi/distribute .. _pip: http://pypi.python.org/pypi/pip .. _old and new python versions: https://launchpad.net/%7Efkrull/+archive/deadsnakes .. _pypi: http://pypi.python.org .. _example pypi: http://packages.python.org/an_example_pypi_project/setuptools.html#intermezzo-pypirc-file-and-gpg .. _github bdist_mpkg: https://github.com/matthew-brett/bdist_mpkg .. _wheel: https://pypi.python.org/pypi/wheel .. _install pip with get-pip.py: https://pip.pypa.io/en/stable/installing/#installing-with-get-pip-py .. Code support stuff .. _pychecker: http://pychecker.sourceforge.net/ .. _pylint: http://www.logilab.org/project/pylint .. _pyflakes: http://divmod.org/trac/wiki/DivmodPyflakes .. _virtualenv: http://pypi.python.org/pypi/virtualenv .. _git: https://git-scm.com .. _flymake: http://flymake.sourceforge.net/ .. _rope: http://rope.sourceforge.net/ .. _pymacs: http://pymacs.progiciels-bpi.ca/pymacs.html .. _ropemacs: http://rope.sourceforge.net/ropemacs.html .. _ECB: http://ecb.sourceforge.net/ .. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode .. _doctest-mode: http://www.cis.upenn.edu/~edloper/projects/doctestmode/ .. _bazaar: http://bazaar-vcs.org/ .. _subversion: http://subversion.tigris.org/ .. _nose: http://nose.readthedocs.org/en/latest .. _pytest: https://pytest.org .. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html .. Other python projects .. _numpy: http://numpy.scipy.org .. _scipy: http://www.scipy.org .. _cython: http://www.cython.org/ .. _ipython: http://ipython.org .. _`ipython manual`: http://ipython.org/ipython-doc/stable/index.html .. _matplotlib: http://matplotlib.sourceforge.net .. _ETS: http://code.enthought.com/projects/tool-suite.php .. _`Enthought Tool Suite`: http://code.enthought.com/projects/tool-suite.php .. _python: http://www.python.org .. _mayavi: http://code.enthought.com/projects/mayavi/ .. _sympy: http://sympy.org .. _nibabel: http://nipy.org/nibabel .. _networkx: http://networkx.lanl.gov/ .. _pythonxy: .. _python (x, y): https://python-xy.github.io/ .. _EPD: http://www.enthought.com/products/epd.php .. _EPD free: http://www.enthought.com/products/epd_free.php .. _Anaconda: https://www.continuum.io/downloads .. _Unofficial Windows binaries: http://www.lfd.uci.edu/~gohlke/pythonlibs .. Python imaging projects .. _PyMVPA: http://www.pymvpa.org .. _BrainVISA: http://brainvisa.info .. _anatomist: http://brainvisa.info .. Not so python imaging projects .. _matlab: http://www.mathworks.com .. _spm: http://www.fil.ion.ucl.ac.uk/spm .. _eeglab: http://sccn.ucsd.edu/eeglab .. _AFNI: http://afni.nimh.nih.gov/afni .. _FSL: http://www.fmrib.ox.ac.uk/fsl .. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu .. _voxbo: http://www.voxbo.org .. _fmristat: http://www.math.mcgill.ca/keith/fmristat .. Visualization .. _vtk: http://www.vtk.org/ .. General software .. _gcc: http://gcc.gnu.org .. _xcode: http://developer.apple.com/TOOLS/xcode .. _mingw: http://www.mingw.org .. _cygwin: http://cygwin.com .. _macports: http://www.macports.org/ .. _VTK: http://www.vtk.org/ .. _ITK: http://www.itk.org/ .. _swig: http://www.swig.org .. Functional imaging labs .. _`Brain Imaging Center`: http://bic.berkeley.edu/ .. _`functional imaging laboratory`: http://www.fil.ion.ucl.ac.uk .. _FMRIB: http://www.fmrib.ox.ac.uk .. Other organizations .. _enthought: .. _kitware: http://www.kitware.com .. General information links .. _`wikipedia FMRI`: http://en.wikipedia.org/wiki/Functional_magnetic_resonance_imaging .. _`wikipedia PET`: http://en.wikipedia.org/wiki/Positron_emission_tomography .. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _MINC: http://wiki.bic.mni.mcgill.ca/index.php/MINC .. Mathematical methods .. _`wikipedia ICA`: http://en.wikipedia.org/wiki/Independent_component_analysis .. _`wikipedia PCA`: http://en.wikipedia.org/wiki/Principal_component_analysis .. Testing .. _travis-ci: https://travis-ci.org .. People .. _Matthew Brett: https://matthew.dynevor.org .. _Yaroslav O. Halchenko: http://www.onerussian.com .. _Michael Hanke: http://apsy.gse.uni-magdeburg.de/hanke .. _Gaël Varoquaux: http://gael-varoquaux.info/ .. _Keith Worsley: http://www.math.mcgill.ca/keith nipy-0.6.1/doc/mission.rst000066400000000000000000000001711470056100100154460ustar00rootroot00000000000000.. _nipy-mission: =================== What is NIPY for? =================== .. include:: mission.txt *The NIPY team* nipy-0.6.1/doc/mission.txt000066400000000000000000000011651470056100100154610ustar00rootroot00000000000000The purpose of NIPY is to make it easier to do better brain imaging research. We believe that neuroscience ideas and analysis ideas develop together. Good ideas come from understanding; understanding comes from clarity, and clarity must come from well-designed teaching materials and well-designed software. The software must be designed as a natural extension of the underlying ideas. We aim to build software that is: * clearly written * clearly explained * a good fit for the underlying ideas * a natural home for collaboration We hope that, if we fail to do this, you will let us know. We will try and make it better. nipy-0.6.1/doc/publications.rst000066400000000000000000000012041470056100100164570ustar00rootroot00000000000000============ Publications ============ Peer-reviewed Publications -------------------------- K. Jarrod Millman, M. Brett, `"Analysis of Functional Magnetic Resonance Imaging in Python," `_ Computing in Science and Engineering, vol. 9, no. 3, pp. 52-55, May/June, 2007. Posters ------- Taylor JE, Worsley K, Brett M, Cointepas Y, Hunter J, Millman KJ, Poline J-B, Perez F. “BrainPy: an open source environment for the analysis and visualization of human brain data.” Meeting of the Organization for Human Brain Mapping, 2005. See the :ref:`BrainPy HBM abstract `. nipy-0.6.1/doc/references/000077500000000000000000000000001470056100100153555ustar00rootroot00000000000000nipy-0.6.1/doc/references/brainpy_abstract.rst000066400000000000000000000060441470056100100214420ustar00rootroot00000000000000.. _brainpy-hbm-abstract: ============================ BrainPy HBM abstract, 2005 ============================ This is the abstract describing the BrainPy / NIPY project from the `HBM2005 `_ conference. BrainPy: an open source environment for the analysis and visualization of human brain data ========================================================================================== Jonathan Taylor (1), Keith Worsley (2), Matthew Brett (3), Yann Cointepas (4), John Hunter (5), Jarrod Millman (3), Jean-Baptiste Poline (4), Fernando Perez (6) 1. Dept. of Statistics, Stanford University, U.S.A. 2. Dept. of Mathematics and Statistics, !McGill University, Canada 3. Department of Neuroscience, University of California, Berkeley, U.S.A 4. Service Hospitalier Frédéric Joliot, France 5. Complex Systems Laboratory, University of Chicago, U.S.A. 6. Department of Applied Mathematics, University of Colorado at Boulder, U.S.A. Objective --------- What follows are the goals of BrainPy, a multi-center project to provide an open source environment for the analysis and visualization of human brain data built on top of python. While the project is still in its initial stages, packages for file I/O, script support as well as single subject fMRI and random effects group comparisons model are currently available. Methods ------- Scientific computing has evolved over the last two decades in two broad directions. One, there has been a movement to the use of high-level interface languages that glue existing high-performance libraries into an accessible, scripted, interactive environment, eg IDL, matlab. Two, there has been a shift to open algorithms and software because this development process leads to better code, and because it more consistent with the scientific method. Results & Discussion -------------------- The proposed environment includes the following: * We intend to provide users with an open source environment which is interoperable with current packages such as SPM and AFNI, both at a file I/O level and, where possible, interactively (e.g. pymat -- calling matlab/SPM from python). * Read/write/conversion support for all major imaging formats and packages (SPM/ANALYZE, :term:`FSL`, :term:`AFNI`, MINC, NIFTI, and :term:`VoxBo` * Low-level access to data through an interactive shell, which is important for developing new analysis methods, as well as high-level access through GUIs for specialized tasks using standard python tools. * Visualization of results using pre-existing tools such as :term:`BrainVisa`, as well as support for development of new tools using VTK. * Support for MATLAB style numeric packages (Numarray) and plotting (matplotlib_). * Support for EEG analysis including EEG/MEG/fMRI fusion analysis. * Support for spatio-temporal wavelet analysis (`PhiWave `_) Conclusions ----------- BrainPy is an open-source environment for the analysis and visualization of neuroimaging data built on top of python. .. include:: ../links_names.txt nipy-0.6.1/doc/sphinxext/000077500000000000000000000000001470056100100152665ustar00rootroot00000000000000nipy-0.6.1/doc/sphinxext/README.txt000066400000000000000000000006341470056100100167670ustar00rootroot00000000000000=================== Sphinx Extensions =================== Thesea are a few sphinx extensions we are using to build the nipy documentation. In this file we list where they each come from, since we intend to always push back upstream any modifications or improvements we make to them. * From matploltlib: * inheritance_diagram.py * From numpy: * numpy_ext * From ipython * ipython_console_highlighting nipy-0.6.1/doc/sphinxext/autosummary_generate.py000077500000000000000000000166161470056100100221150ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: r""" autosummary_generate.py OPTIONS FILES Generate automatic RST source files for items referred to in autosummary:: directives. Each generated RST file contains a single auto*:: directive which extracts the docstring of the referred item. Example Makefile rule:: generate: ./ext/autosummary_generate.py -o source/generated source/*.rst """ import glob import inspect import optparse import os import pydoc import re from autosummary import import_by_name try: from phantom_import import import_phantom_module except ImportError: import_phantom_module = lambda x: x def main(): p = optparse.OptionParser(__doc__.strip()) p.add_option("-p", "--phantom", action="store", type="string", dest="phantom", default=None, help="Phantom import modules from a file") p.add_option("-o", "--output-dir", action="store", type="string", dest="output_dir", default=None, help=("Write all output files to the given directory (instead " "of writing them as specified in the autosummary:: " "directives)")) options, args = p.parse_args() if len(args) == 0: p.error("wrong number of arguments") if options.phantom and os.path.isfile(options.phantom): import_phantom_module(options.phantom) # read names = {} for name, loc in get_documented(args).items(): for (filename, sec_title, keyword, toctree) in loc: if toctree is not None: path = os.path.join(os.path.dirname(filename), toctree) names[name] = os.path.abspath(path) # write for name, path in sorted(names.items()): if options.output_dir is not None: path = options.output_dir if not os.path.isdir(path): os.makedirs(path) try: obj, name = import_by_name(name) except ImportError as e: print(f"Failed to import '{name}': {e}") continue fn = os.path.join(path, f'{name}.rst') if os.path.exists(fn): # skip continue f = open(fn, 'w') try: f.write('{}\n{}\n\n'.format(name, '='*len(name))) if inspect.isclass(obj): if issubclass(obj, Exception): f.write(format_modulemember(name, 'autoexception')) else: f.write(format_modulemember(name, 'autoclass')) elif inspect.ismodule(obj): f.write(format_modulemember(name, 'automodule')) elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): f.write(format_classmember(name, 'automethod')) elif callable(obj): f.write(format_modulemember(name, 'autofunction')) elif hasattr(obj, '__get__'): f.write(format_classmember(name, 'autoattribute')) else: f.write(format_modulemember(name, 'autofunction')) finally: f.close() def format_modulemember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-1]), parts[-1] return f".. currentmodule:: {mod}\n\n.. {directive}:: {name}\n" def format_classmember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) return f".. currentmodule:: {mod}\n\n.. {directive}:: {name}\n" def get_documented(filenames): """ Find out what items are documented in source/*.rst See `get_documented_in_lines`. """ documented = {} for filename in filenames: f = open(filename) lines = f.read().splitlines() documented.update(get_documented_in_lines(lines, filename=filename)) f.close() return documented def get_documented_in_docstring(name, module=None, filename=None): """ Find out what items are documented in the given object's docstring. See `get_documented_in_lines`. """ try: obj, real_name = import_by_name(name) lines = pydoc.getdoc(obj).splitlines() return get_documented_in_lines(lines, module=name, filename=filename) except AttributeError: pass except ImportError as e: print(f"Failed to import '{name}': {e}") return {} def get_documented_in_lines(lines, module=None, filename=None): """ Find out what items are documented in the given lines Returns ------- documented : dict of list of (filename, title, keyword, toctree) Dictionary whose keys are documented names of objects. The value is a list of locations where the object was documented. Each location is a tuple of filename, the current section title, the name of the directive, and the value of the :toctree: argument (if present) of the directive. """ title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?') toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') documented = {} current_title = [] last_line = None toctree = None current_module = module in_autosummary = False for line in lines: try: if in_autosummary: m = toctree_arg_re.match(line) if m: toctree = m.group(1) continue if line.strip().startswith(':'): continue # skip options m = autosummary_item_re.match(line) if m: name = m.group(1).strip() if current_module and not name.startswith(current_module + '.'): name = f"{current_module}.{name}" documented.setdefault(name, []).append( (filename, current_title, 'autosummary', toctree)) continue if line.strip() == '': continue in_autosummary = False m = autosummary_re.match(line) if m: in_autosummary = True continue m = autodoc_re.search(line) if m: name = m.group(2).strip() if m.group(1) == "module": current_module = name documented.update(get_documented_in_docstring( name, filename=filename)) elif current_module and not name.startswith(current_module+'.'): name = f"{current_module}.{name}" documented.setdefault(name, []).append( (filename, current_title, "auto" + m.group(1), None)) continue m = title_underline_re.match(line) if m and last_line: current_title = last_line.strip() continue m = module_re.match(line) if m: current_module = m.group(2) continue finally: last_line = line return documented if __name__ == "__main__": main() nipy-0.6.1/doc/users/000077500000000000000000000000001470056100100143755ustar00rootroot00000000000000nipy-0.6.1/doc/users/basic_io.rst000066400000000000000000000044471470056100100167100ustar00rootroot00000000000000.. basic_data_io: =============== Basic Data IO =============== Accessing images using nipy: While Nifti_ is the primary file format Analyze images (with associated .mat file), and MINC files can also be read. Load Image from File ==================== Get a filename for an example file. ``anatfile`` gives a filename for a small testing image in the nipy distribution: >>> from nipy.testing import anatfile Load the file from disk: >>> from nipy import load_image >>> myimg = load_image(anatfile) >>> myimg.shape (33, 41, 25) >>> myimg.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) Access Data into an Array ========================= This allows the user to access data as a numpy array. >>> mydata = myimg.get_fdata() >>> mydata.shape (33, 41, 25) >>> mydata.ndim 3 Save image to a File ==================== >>> from nipy import save_image >>> newimg = save_image(myimg, 'newmyfile.nii') Create Image from an Array =========================== This will have a generic affine-type CoordinateMap with unit voxel sizes. >>> import numpy as np >>> from nipy.core.api import Image, vox2mni >>> rawarray = np.zeros((43,128,128)) >>> arr_img = Image(rawarray, vox2mni(np.eye(4))) >>> arr_img.shape (43, 128, 128) Coordinate map ============== Images have a Coordinate Map. The Coordinate Map contains information defining the input (domain) and output (range) Coordinate Systems of the image, and the mapping between the two Coordinate systems. The *input* coordinate system is the *voxel* coordinate system, and the *output* coordinate system is the *world* coordinate system. >>> newimg.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S'), name='aligned', coord_dtype=float64), affine=array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) ) See :ref:`coordinate_map` for more detail. .. testcleanup:: # Delete the file we wrote out further up the page. import os os.unlink('newmyfile.nii') .. include:: ../links_names.txt nipy-0.6.1/doc/users/coordinate_map.rst000066400000000000000000000142621470056100100201200ustar00rootroot00000000000000.. _coordinate_map: ############################# Basics of the Coordinate Map ############################# When you load an image it will have an associated Coordinate Map **Coordinate Map** The Coordinate Map contains information defining the input (domain) and output (range) Coordinate Systems of the image, and the mapping between the two Coordinate systems. The *input* or *domain* in an image are voxel coordinates in the image array. The *output* or *range* are the millimetre coordinates in some space, that correspond to the input (voxel) coordinates. >>> import nipy Get a filename for an example file: >>> from nipy.testing import anatfile Get the coordinate map for the image: >>> anat_img = nipy.load_image(anatfile) >>> coordmap = anat_img.coordmap For more on Coordinate Systems and their properties :mod:`nipy.core.reference.coordinate_system` You can inspect a coordinate map:: >>> coordmap.function_domain.coord_names >>> ('i', 'j', 'k') >>> coordmap.function_range.coord_names ('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S') >>> coordmap.function_domain.name 'voxels' >>> coordmap.function_range.name 'aligned' A Coordinate Map has a mapping from the *input* Coordinate System to the *output* Coordinate System Here we can see we have a voxel to millimeter mapping from the voxel space (i,j,k) to the millimeter space (x,y,z) We can also get the name of the respective Coordinate Systems that our Coordinate Map maps between. A Coordinate Map is two Coordinate Systems with a mapping between them. Formally the mapping is a function that takes points from the input Coordinate System and returns points from the output Coordinate System. This is the same as saying that the mapping takes points in the mapping function *domain* and transforms them to points in the mapping function *range*. Often this is simple as applying an Affine transform. In that case the Coordinate System may well have an affine property which returns the affine matrix corresponding to the transform. >>> coordmap.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) If you call the Coordinate Map you will apply the mapping function between the two Coordinate Systems. In this case from (i,j,k) to (x,y,z): >>> coordmap([1,2,3]) array([ 30., -36., -10.]) It can also be used to get the inverse mapping, or in this example from (x,y,z) back to (i,j,k): >>> coordmap.inverse()([30.,-36.,-10.]) array([1., 2., 3.]) We can see how this works if we just apply the affine ourselves using dot product. .. Note:: Notice the affine is using homogeneous coordinates so we need to add a 1 to our input. (And note how a direct call to the coordinate map does this work for you) >>> coordmap.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) >>> import numpy as np >>> np.dot(coordmap.affine, np.transpose([1,2,3,1])) array([ 30., -36., -10., 1.]) .. Note:: The answer is the same as above (except for the added 1) .. _normalize-coordmap: *************************************************** Use of the Coordinate Map for spatial normalization *************************************************** The Coordinate Map can be used to describe the transformations needed to perform spatial normalization. Suppose we have an anatomical Image from one subject *subject_img* and we want to create an Image in a standard space like Tailarach space. An affine registration algorithm will produce a 4-by-4 matrix representing the affine transformation, *T*, that takes a point in the subject's coordinates *subject_world* to a point in Tailarach space *tailarach_world*. The subject's Image has its own Coordinate Map, *subject_cmap* and there is a Coordinate Map for Tailarach space which we will call *tailarach_cmap*. Having found the transformation matrix *T*, the next step in spatial normalization is usually to resample the array of *subject_img* so that it has the same shape as some atlas *atlas_img*. Note that because it is an atlas Image, *tailarach_camp=atlas_img.coordmap*. A resampling algorithm uses an interpolator which needs to know which voxel of *subject_img* corresponds to which voxel of *atlas_img*. This is therefore a function from *atlas_voxel* to *subject_voxel*. This function, paired with the information that it is a map from atlas-voxel to subject-voxel is another example of a Coordinate Map. The code to do this might look something like the following: >>> from nipy.testing import anatfile, funcfile >>> from nipy.algorithms.registration import HistogramRegistration >>> from nipy.algorithms.kernel_smooth import LinearFilter We'll make a smoothed version of the anatomical example image, and pretend it's the template >>> smoother = LinearFilter(anat_img.coordmap, anat_img.shape) >>> atlas_im = smoother.smooth(anat_img) >>> subject_im = anat_img We do an affine registration between the two. >>> reggie = HistogramRegistration(subject_im, atlas_im) >>> aff = reggie.optimize('affine').as_affine() #doctest: +ELLIPSIS Initial guess... ... Now we make a coordmap with this transformation >>> from nipy.core.api import AffineTransform >>> subject_cmap = subject_im.coordmap >>> talairach_cmap = atlas_im.coordmap >>> subject_world_to_talairach_world = AffineTransform( ... subject_cmap.function_range, ... talairach_cmap.function_range, ... aff) ... We resample the 'subject' image to the 'atlas image >>> from nipy.algorithms.resample import resample >>> normalized_subject_im = resample(subject_im, talairach_cmap, ... subject_world_to_talairach_world, ... atlas_im.shape) >>> normalized_subject_im.shape == atlas_im.shape True >>> normalized_subject_im.coordmap == atlas_im.coordmap True >>> # Normalized image now has atlas affine. >>> assert np.all(normalized_subject_im.affine == atlas_im.affine) *********************** Mathematical definition *********************** For a more formal mathematical description of the coordinate map, see :ref:`math-coordmap`. nipy-0.6.1/doc/users/glm_spec.rst000066400000000000000000000416361470056100100167320ustar00rootroot00000000000000========================== Specifying a GLM in NiPy ========================== In this tutorial we will discuss NiPy's model and specification of a fMRI experiment. This involves: * an experimental model: a description of the experimental protocol (function of experimental time) * a neuronal model: a model of how a particular neuron responds to the experimental protocol (function of the experimental model) * a hemodynamic model: a model of the BOLD signal at a particular voxel, (function of the neuronal model) Experimental model ================== We first begin by describing typically encountered fMRI designs. * Event-related categorical design, i.e. *Face* vs. *Object* * Block categorical design * Continuous stimuli, i.e. a rotating checkerboard * Events with amplitudes, i.e. non-categorical values * Events with random amplitudes Event-related categorical design -------------------------------- .. _face-object: This design is a canonical design in fMRI used, for instance, in an experiment designed to detect regions associated to discrimination between *Face* and *Object*. This design can be graphically represented in terms of delta-function responses that are effectively events of duration 0 and infinite height. .. plot:: users/plots/event.py In this example, there *Face* event types are presented at times [0,4,8,12,16] and *Object* event types at times [2,6,10,14,18]. More generally, given a set of event types *V*, an event type experiment can be modeled as a sum of delta functions (point masses) at pairs of times and event types: .. math:: E = \sum_{j=1}^{10} \delta_{(t_j, a_j)}. Formally, this can be thought of as realization of a :term:`marked point process`, that says we observe 10 points in the space :math:`\mathbb{R} \times V` where *V* is the set of all event types. Alternatively, we can think of the experiment as a measure :math:`E` on :math:`\mathbb{R} \times V` .. math:: E([t_1,t_2] \times A) = \int_{t_1}^{t_2} \int_A dE(v,t) This intensity measure determines, in words, "the amount of stimulus within *A* delivered in the interval :math:`[t_1,t_2]`". In this categorical design, stimuli :math:`a_j` are delivered as point masses at the times :math:`t_j`. Practically speaking, we can read this as saying that our experiment has 10 events, occurring at times :math:`t_1,\dots,t_{10}` with event types :math:`a_1,\dots,a_{10} \in V`. Typically, as in our *Face* vs *Object* example, the events occur in groups, say odd events are labelled *a*, even ones *b*. We might rewrite this as .. math:: E = \delta_{(t_1,a)} + \delta_{(t_2,b)} + \delta_{(t_3,a)} + \dots + \delta_{(t_{10},b)} This type of experiment can be represented by two counting processes, i.e. measures on :math:`mathbb{R}`, :math:`(E_a, E_b)` defined as .. math:: \begin{aligned} E_a(t) &= \sum_{t_j, \text{$j$ odd}} 1_{(-\infty,t_j]}(t) \\ &= E((-\infty,t], \{a\}) \\ E_b(t) &= \sum_{t_j, \text{$j$ even}} 1_{(-\infty,t_j]}(t) \\ &= E((-\infty,t], \{b\}) \\ \end{aligned} Counting processes vs. intensities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Though the experiment above can be represented in terms of the pair :math:`(E_a(t), E_b(t))`, it is more common in neuroimaging applications to work with instantaneous intensities rather then cumulative intensities. .. math:: \begin{aligned} e_a(t) &= \frac{\partial }{\partial t} E_a(t) \\ e_b(t) &= \frac{\partial }{\partial t} E_b(t) \end{aligned} For the time being, we will stick with cumulative intensities because it unifies the designs above. When we turn to the neuronal model below, we will return to the intensity model. .. _block-face: Block categorical design ------------------------ For block designs of the *Face* vs. *Object* type, we might also allow event durations, meaning that we show the subjects a *Face* for a period of, say, 0.5 seconds. We might represent this experiment graphically as follows, .. plot:: users/plots/block.py and the intensity measure for the experiment could be expressed in terms of .. math:: \begin{aligned} E_a(t) &= E((-\infty,t], \{a\}) &= \sum_{t_j, \text{$j$ odd}} \frac{1}{0.5} \int_{t_j}^ {\min(t_j+0.5, t)} \; ds \\ E_b(t) &= E((-\infty,t], \{b\}) &= \sum_{t_j, \text{$j$ even}} \frac{1}{0.5} \int_{t_j}^ {\min(t_j+0.5, t)} \; ds \\ \end{aligned} The normalization chosen above ensures that each event has integral 1, that is a total of 1 "stimulus unit" is presented for each 0.5 second block. This may or may not be desirable, and could easily be changed. Continuous stimuli ------------------ .. _continuous-stimuli: Some experiments do not fit well into this "event-type" paradigm but are, rather, more continuous in nature. For instance, a rotating checkerboard, for which orientation, contrast, are functions of experiment time *t*. This experiment can be represented in terms of a state vector :math:`(O(t), C(t))`. In this example we have set .. testcode:: import numpy as np t = np.linspace(0,10,1000) o = np.sin(2*np.pi*(t+1)) * np.exp(-t/10) c = np.sin(2*np.pi*(t+0.2)/4) * np.exp(-t/12) .. plot:: users/plots/sinusoidal.py The cumulative intensity measure for such an experiment might look like .. math:: E([t_1, t_2], A) = \int_{t_1}^{t_2} \left(\int_A \; dc \; do\right) \; dt. In words, this reads as :math:`E([t_1,t_2],A)` is the amount of time in the interval :math:`[t_1,t_2]` for which the state vector :math:`(O(t), C(t))` was in the region :math:`A`. .. _event-amplitudes: Events with amplitudes ---------------------- Another (event-related) experimental paradigm is one in which the event types have amplitudes, perhaps in a pain experiment with a heat stimulus, we might consider the temperature an amplitude. These amplitudes could be multi-valued. We might represent this parametric design mathematically as .. math:: E = \sum_{j=1}^{10} \delta_{(t_j, a_j)}, which is virtually identical to our description of the *Face* vs. *Object* experiment in :ref:`face-object` though the values :math:`a_j` are floats rather than labels. Graphically, this experiment might be represented as in this figure below. .. plot:: users/plots/amplitudes.py Events with random amplitudes ----------------------------- Another possible approach to specifying an experiment might be to deliver a randomly generated stimulus, say, uniformly distributed on some interval, at a set of prespecified event times. We might represent this graphically as in the following figure. .. plot:: users/plots/random_amplitudes.py Of course, the stimuli need not be randomly distributed over some interval, they could have fairly arbitrary distributions. Or, in the *Face* vs *Object* scenario, we could randomly present of one of the two types and the distribution at a particular event time :math:`t_j` would be represented by a probability :math:`P_j`. The cumulative intensity model for such an experiment might be .. math:: E([t_1, t_2], A) = \sum_j 1_{[t_1, t_2]}(t_j) \int_A \; P_j(da) If the times were not prespecified but were themselves random, say uniform over intervals :math:`[u_j,v_j]`, we might modify the cumulative intensity to be .. math:: E([t_1, t_2], A) = \sum_j \int_{\max(u_j,t_1)}^{\min(v_j, t_2)} \int_A \; P_j(da) \; dt .. plot:: users/plots/random_amplitudes_times.py ================ Neuronal model ================ The neuronal model is a model of the activity as a function of *t* at a neuron *x* given the experimental model :math:`E`. It is most commonly expressed as some linear function of the experiment :math:`E`. As with the experimental model, we prefer to start off by working with the cumulative neuronal activity, a measure on :math:`\mathbb{R}`, though, ultimately we will work with the intensities in :ref:`intensity`. Typically, the neuronal model with an experiment model :math:`E` has the form .. math:: N([t_1,t_2]) = \int_{t_1}^{t_2}\int_V f(v,t) \; dE(v,t) Unlike the experimental model, which can look somewhat abstract, the neuronal model can be directly modeled. For example, take the standard *Face* vs. *Object* model :ref:`face-object`, in which case :math:`V=\{a,b\}` and we can set .. math:: f(v,t) = \begin{cases} \beta_a & v = a \\ \beta_b & v = b \end{cases} Thus, the cumulative neuronal model can be expressed as .. testcode:: from sympy import Symbol, Heaviside t = Symbol('t') ta = [0,4,8,12,16] tb = [2,6,10,14,18] ba = Symbol('ba') bb = Symbol('bb') fa = sum([Heaviside(t-_t) for _t in ta]) * ba fb = sum([Heaviside(t-_t) for _t in tb]) * bb N = fa+fb Or, graphically, if we set :math:`\beta_a=1` and :math:`\beta_b=-2`, as .. plot:: users/plots/neuronal_event.py In the block design, we might have the same form for the neuronal model (i.e. the same :math:`f` above), but the different experimental model :math:`E` yields .. testcode:: from sympy import Symbol, Piecewise ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba') bb = Symbol('bb') fa = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta])*ba fb = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb])*bb N = fa+fb Or, graphically, if we set :math:`\beta_a=1` and :math:`\beta_b=-2`, as .. plot:: users/plots/neuronal_block.py The function :math:`f` above can be expressed as .. math:: f(v,t) = \beta_a 1_{\{a\}}(v) + \beta_b 1_{\{b\}}(v) = \beta_a f_a(v,t) + \beta_b f_b(v,t) Hence, our typical neuronal model can be expressed as a sum .. math:: \begin{aligned} N([t_1,t_2]) &= \sum_i \beta_i \int_{t_1}^{t_2} \int_V f_i(v,t) \; dE(v,t) \\ &= \sum_i \beta_i \tilde{N}_{f_i}([t_1,t_2]) \end{aligned} for arbitrary functions :math:`\tilde{N}_{f_i}`. Above, :math:`\tilde{N}_{f_i}` represents the stimulus contributed to :math:`N` from the function :math:`f_i`. In the *Face* vs. *Object* example :ref:`face-object`, these cumulative intensities are related to the more common of neuronal model of intensities in terms of delta functions .. math:: \frac{\partial}{\partial t} \tilde{N}_{f_a}(t) = \beta_a \sum_{t_i: \text{$i$ odd}} \delta_{t_i}(t) .. testcode:: from sympy import Symbol, Heaviside ta = [0,4,8,12,16] t = Symbol('t') ba = Symbol('ba') fa = sum([Heaviside(t-_t) for _t in ta]) * ba print(fa.diff(t)) .. testoutput:: ba*(DiracDelta(t) + DiracDelta(t - 16) + DiracDelta(t - 12) + DiracDelta(t - 8) + DiracDelta(t - 4)) .. plot:: users/plots/hrf_delta.py Convolution =========== In our continuous example above, with a periodic orientation and contrast, we might take .. math:: \begin{aligned} f_O(t,(o,c)) &= o \\ f_O(t,(o,c)) &= c \\ \end{aligned} yielding a neuronal model .. math:: N([t_1,t_2]) = \beta_{O} O(t) + \beta_{C} C(t) We might also want to allow a delay in the neuronal model .. math:: N^{\text{delay}}([t_1,t_2]) = \beta_{O} O(t-\tau_O) + \beta_{C} C(t-\tau_C). This delay can be represented mathematically in terms of convolution (of measures) .. math:: N^{\text{delay}}([t_1,t_2]) = \left(\tilde{N}_{f_O} * \delta_{-\tau_O}\right)([t_1, t_2]) +\left(\tilde{N}_{f_C} * \delta_{-\tau_C}\right)([t_1, t_2]) Another model that uses convolution is the *Face* vs. *Object* one in which the neuronal signal is attenuated with an exponential decay at time scale :math:`\tau` .. math:: D([t_1, t_2]) = \int_{\max(t_1,0)}^{t_2} \tau e^{-\tau t} \; dt yielding .. math:: N^{\text{decay}}([t_1,t_2]) = (N * D)[t_1, t_2] ======================== Events with amplitudes ======================== We described a model above :ref:`event-amplitude` with events that each have a continuous value :math:`a` attached to them. In terms of a neuronal model, it seems reasonable to suppose that the (cumulative) neuronal activity is related to some function, perhaps expressed as a polynomial :math:`h(a)=\sum_j \beta_j a^j` yielding a neuronal model .. math:: N([t_1, t_2]) = \sum_j \beta_j \tilde{N}_{a^j}([t_1, t_2]) Hemodynamic model ================= The hemodynamic model is a model for the BOLD signal, expressed as some function of the neuronal model. The most common hemodynamic model is just the convolution of the neuronal model with some hemodynamic response function, :math:`HRF` .. math:: \begin{aligned} HRF((-\infty,t]) &= \int_{-\infty}^t h_{can}(s) \; ds \\ H([t_1,t_2]) & = (N * HRF)[t_1,t_2] \end{aligned} The canonical one is a difference of two Gamma densities .. plot:: users/plots/hrf.py Intensities =========== Hemodynamic models are, as mentioned above, most commonly expressed in terms of instantaneous intensities rather than cumulative intensities. Define .. math:: n(t) = \frac{\partial}{\partial t} N((-\infty,t]). The simple model above can then be written as .. math:: h(t) = \frac{\partial}{\partial t}(N * HRF)(t) = \int_{-\infty}^{\infty} n(t-s) h_{can}(s) \; ds. In the *Face* vs. *Object* experiment, the integrals above can be evaluated explicitly because :math:`n(t)` is a sum of delta functions .. math:: n(t) = \beta_a \sum_{t_i: \text{$i$ odd}} \delta_{t_i}(t) + \beta_b \sum_{t_i: \text{$i$ even}} \delta_{t_i}(t) In this experiment we may want to allow different hemodynamic response functions within each group, say :math:`h_a` within group :math:`a` and :math:`h_b` within group :math:`b`. This yields a hemodynamic model .. math:: h(t) = \beta_a \sum_{t_i: \text{$i$ odd}} h_a(t-t_i) + \beta_b \sum_{t_i: \text{$i$ even}} h_b(t-t_i) .. testcode:: from nipy.modalities.fmri import hrf ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = 1; bb = -2 na = ba * sum([hrf.glover(hrf.T - t) for t in ta]) nb = bb * sum([hrf.afni(hrf.T - t) for t in tb]) n = na + nb .. plot:: users/plots/hrf_different.py Applying the simple model to the events with amplitude model and the canonical HRF yields a hemodynamic model .. math:: h(t) = \sum_{i,j} \beta_j a_i^j h_{can}(t-t_i) .. testcode:: import numpy as np from nipy.modalities.fmri.utils import events, Symbol a = Symbol('a') b = np.linspace(0,50,6) amp = b*([-1,1]*3) d = events(b, amplitudes=amp, g=a+0.5*a**2, f=hrf.glover) .. plot:: users/plots/event_amplitude.py Derivative information ====================== In cases where the neuronal model has more than one derivative, such as the continuous stimuli :ref:`continuous-stimuli` example, we might model the hemodynamic response using the higher derivatives as well. For example .. math:: h(t) = \beta_{O,0} \tilde{n}_{f_O}(t) + \beta_{O,1} \frac{\partial}{\partial t}\tilde{n}_{f_O}(t) + \beta_{C,0} \tilde{n}_{f_C}(t) + \beta_{C,1} \frac{\partial} {\partial t}\tilde{n}_{f_C}(t) where .. math:: \begin{aligned} \tilde{n}_f(t) &= \frac{\partial}{\partial t} \tilde{N}_f((-\infty,t]) \\ &= \frac{\partial}{\partial t} \left( \int_{-\infty}^t \int_V f(v,t) \; dE(v,t) \right) \end{aligned} ============= Design matrix ============= In a typical GLM analysis, we will compare the observed BOLD signal :math:`B(t)` at some fixed voxel :math:`x`, observed at time points :math:`(s_1, \dots, s_n)`, to a hemodynamic response model. For instance, in the *Face* vs. *Object* model, using the canonical HRF .. MAYBE SOME DATA PLOTTED HERE .. math:: B(t) = \beta_a \sum_{t_i: \text{$i$ odd}} h_{can}(t-t_i) + \beta_b \sum_{t_i: \text{$i$ even}} h_{can}(t-t_i) + \epsilon(t) where :math:`\epsilon(t)` is the correlated noise in the BOLD data. Because the BOLD is modeled as linear in :math:`(\beta_a,\beta_b)` this fits into a multiple linear regression model setting, typically written as .. math:: Y_{n \times 1} = X_{n \times p} \beta_{p \times 1} + \epsilon_{n \times 1} In order to fit the regression model, we must find the matrix :math:`X`. This is just the derivative of the model of the mean of :math:`B` with respect to the parameters to be estimated. Setting :math:`(\beta_1, \beta_2)=(\beta_a, \beta_b)` .. math:: X_{ij} = \frac{\partial}{\partial \beta_j} \left(\beta_1 \sum_{t_k: \text{$k$ odd}} h_{can}(s_i-t_k) + \beta_b \sum_{t_k: \text{$k$ even}} h_{can}(s_i-t_k) \right) .. PUT IN PLOTS OF COLUMNS OF DESIGN HERE Drift ===== We sometimes include a natural spline model of the drift here. .. PLOT A NATURAL SPLINE .. MAYBE A COSINE BASIS This changes the design matrix by adding more columns, one for each function in our model of the drift. In general, starting from some model of the mean the design matrix is the derivative of the model of the mean, differentiated with respect to all parameters to be estimated (in some fixed order). Nonlinear example ================= The delayed continuous stimuli example above is an example of a nonlinear function of the mean that is nonlinear in some parameters, :math:`(\tau_O, \tau_C)`. .. CODE EXAMPLE OF THIS USING SYMPY =============== Formula objects =============== This experience of building the model can often be simplified, using what is known in :ref:R as *formula* objects. NiPy has implemented a formula object that is similar to R's, but differs in some important respects. See :mod:`nipy.algorithms.statistics.formula`. nipy-0.6.1/doc/users/index.rst000066400000000000000000000006621470056100100162420ustar00rootroot00000000000000.. _users-guide-index: .. This is the source doc for the nipy users guide. The users guide includes the FAQ (a directory below), and glossary. ============ User Guide ============ .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction installation scipy_orientation tutorial.rst ../glossary .. only:: html * :ref:`genindex` * :ref:`modindex` * :ref:`search` nipy-0.6.1/doc/users/install_data.rst000066400000000000000000000117531470056100100175750ustar00rootroot00000000000000.. _data-files: ###################### Optional data packages ###################### The source code has some very small data files to run the tests with, but it doesn't include larger example data files, or the all-important brain templates we all use. You can find packages for the optional data and template files at http://nipy.org/data-packages. If you don't have these packages, then, when you run nipy installation, you will probably see messages pointing you to the packages you need. ********************************************* Data package installation as an administrator ********************************************* The installation procedure, for now, is very basic. For example, let us say that you need the 'nipy-templates' package at http://nipy.org/data-packages/nipy-templates-0.3.tar.gz . You simply download this archive, unpack it, and then run the standard ``python setup.py install`` on it. On a unix system this might look like:: # curl -L flag to follow redirect; can also use wget curl -OL http://nipy.org/data-packages/nipy-templates-0.3.tar.gz tar zxvf nipy-templates-0.3.tar.gz cd nipy-templates-0.3 sudo python setup.py install This is for the `nipy-templates` package; there is also a `nipy-data` package, for which the equivalent would be: curl -OL http://nipy.org/data-packages/nipy-data-0.3.tar.gz On windows, download the file, extract the archive to a folder using the GUI, and then, using the windows shell or similar:: cd c:\path\to\extracted\files python setup.py install ******************************************* Non-administrator data package installation ******************************************* The simple ugly manual way ========================== These are instructions for using the command line in Unix. You can do similar things from Windows powershell. * Locate your nipy user directory from the output of this:: python -c 'import nibabel.data; print(nibabel.data.get_nipy_user_dir())' Call that directory ````. Let's imagine that, for you, this is ``~/.nipy``. * Make a subdirectory ``nipy`` in your ```` directory. In Unix you could use:: mkdir -p ~/.nipy/nipy where the ``-p`` flag tells Unix to make any necessary parent directories. * Go to http://nipy.org/data-packages * Download the latest *nipy-templates* and *nipy-data* packages, to some directory. You can do this via the GUI, or on the command line (in Unix):: cd ~/Downloads curl -OL http://nipy.org/data-packages/nipy-templates-0.3.tar.gz curl -OL http://nipy.org/data-packages/nipy-data-0.3.tar.gz * Unpack both of these:: tar zxvf nipy-data-0.3.tar.gz tar zxvf nipy-templates-0.3.tar.gz * After you have unpacked the templates, you will have a directory called something like ``nipy-templates-0.3``. In that directory you should see a subdirectory called ``templates``. Copy / move / link the ``templates`` subdirectory into ``/nipy``, so you now have a directory ``/nipy/templates``. From unpacking the data, you should also have a directory like ``nipy-data-0.3`` with a subdirectory ``data``. Copy / move / link that ``data`` directory into ``/nipy`` as well. For example:: cp -r nipy-data-0.3/data ~/.nipy/nipy cp -r nipy-templates-0.3/templates ~/.nipy/nipy * Check whether that worked. Run the following command from the shell:: python -c 'import nipy.utils; print(nipy.utils.example_data, nipy.utils.templates)' It should show something like:: (, ) If it shows ``Bomber`` objects instead, something is wrong. Go back and check that you have the nipy home directory right, and that you have directories ``/nipy/data`` and ``/nipy/templates>``, and that each of these two directories have a file ``config.ini`` in them. The more general way ==================== The commands for the system install above assume you are installing into the default system directories. If you want to install into a custom directory, then (in python, or ipython, or a text editor) look at the help for ``nibabel.data.get_data_path()`` . There are instructions there for pointing your nipy installation to the installed data. On unix ------- For example, say you installed with:: cd nipy-templates-0.3 python setup.py install --prefix=/home/my-user/some-dir Then you may want to do make a file ``~/.nipy/config.ini`` with the following contents:: [DATA] path=/home/my-user/some-dir/share/nipy On windows ---------- Say you installed with (windows shell):: cd nipy-templates-0.3 python setup.py install --prefix=c:\some\path Then first, find out your home directory:: python -c "import os; print os.path.expanduser('~')" Let's say that was ``c:\Documents and Settings\My User``. Then, make a new file called ``c:\Documents and Settings\My User\_nipy\config.ini`` with contents:: [DATA] path=c:\some\path\share\nipy nipy-0.6.1/doc/users/installation.rst000066400000000000000000000161651470056100100176410ustar00rootroot00000000000000.. _installation: #################### Download and Install #################### ******* Summary ******* * if you don't have it, install Python using the instructions below; * if you don't have it, install Pip_ using the instructions below; * if you don't have them, install NumPy_ >= 1.14 and Scipy >= 1.0 using the instructions below; * install Nipy with something like: .. code-block:: bash pip3 install --user nipy .. note:: These instructions are for Python 3. If you are using Python 2.7, use ``python2`` instead of ``python3`` and ``pip2`` instead of ``pip3``, for the commands below. ******* Details ******* Install Python, Pip, Numpy and Scipy ==================================== First install Python 3, then install the Python package installer Pip. Install Python 3 on Linux ------------------------- We recommend: * ``sudo apt-get install -y python3 python3-tk`` (Debian, Ubuntu); * ``sudo dnf install -y python3 python3-tkinter`` (Fedora). These are the bare minimum installs. You will almost certainly want to install the development tools for Python to allow you to compile other Python packages: * ``sudo apt-get install -y python3-dev`` (Debian, Ubuntu); * ``sudo dnf install -y python3-devel`` (Fedora). Now :ref:`install-pip`. Install Python 3 on macOS ------------------------- We recommend you install Python 3.5 or later using Homebrew (http://brew.sh/): .. code-block:: bash brew install python3 Homebrew is an excellent all-round package manager for macOS that you can use to install many other free / open-source packages. Now :ref:`install-pip`. .. _install-pip: Install Pip on Linux or macOS ----------------------------- Pip can install packages into your main system directories (a *system* install), or into your own user directories (a *user* install). We strongly recommend *user* installs. To get ready for user installs, put the user local install ``bin`` directory on your user's executable program ``PATH``. First find the location of the user ``bin`` directory with: .. code-block:: bash python3 -c 'import site; print(site.USER_BASE + "/bin")' This will give you a result like ``/home/your_username/.local/bin`` (Linux) or ``/Users/your_username/Library/Python/3.5/bin`` (macOS). Use your favorite text editor to open the ``~/.bashrc`` file (Linux) or ``.bash_profile`` (macOSX) in your home directory. Add these lines to end of the file: .. code-block:: bash # Put the path to the local bin directory into a variable py3_local_bin=$(python3 -c 'import site; print(site.USER_BASE + "/bin")') # Put the directory at the front of the system PATH export PATH="$py3_local_bin:$PATH" Save the file, and restart your terminal to load the configuration from your ``~/.bashrc`` (Linux) or ``~/.bash_profile`` (macOS) file. Confirm that you have the user install directory in your PATH, with: .. code-block:: bash echo $PATH Now install the Python package installer Pip into your user directories (see: `install pip with get-pip.py`_): .. code-block:: bash # Download the get-pip.py installer curl -LO https://bootstrap.pypa.io/get-pip.py # Execute the installer for Python 3 and a user install python3 get-pip.py --user Check you have the right version of the ``pip3`` command with: .. code-block:: bash which pip3 This should give you something like ``/home/your_username/.local/bin/pip3`` (Linux) or ``/Users/your_username/Library/Python/3.5/bin`` (macOS). Now :ref:`install-numpy-scipy`. .. _install-numpy-scipy: Install Python 3, Pip, NumPy and Scipy on Windows ------------------------------------------------- It's worth saying here that very few scientific Python developers use Windows, so if you're thinking of making the switch to Linux or macOS, now you have another reason to do that. Option 1: Anaconda ^^^^^^^^^^^^^^^^^^ If you are installing on Windows, you might want to use the Python 3 version of `Anaconda`_. This is a large installer that will install many scientific Python packages, including NumPy and Scipy, as well as Python itself, and Pip, the package manager. The machinery for the Anaconda bundle is not completely open-source, and is owned by a company, Continuum Analytics. If you would prefer to avoid using the Anaconda installer, you can also use the Python standard Pip installer. Option 2: Standard install ^^^^^^^^^^^^^^^^^^^^^^^^^^ If you don't have Python / Pip, we recommend the instructions `here `_ to install them. You can also install Python / Pip via the Python 3 installer from the https://python.org website. If you already have an old Python installation, you don't have Pip, and you don't want to upgrade, you will need to download and install Pip following the instructions at `install pip with get-pip.py`_. Now open a Cmd or Powershell terminal and run: .. code-block:: bash pip3 install --user numpy scipy Install Nipy ============ Now you have Python and Pip: .. code-block:: bash pip3 install --user nipy On Windows, macOS, and nearly all Linux versions on Intel, this will install a binary (Wheel_) package of NiPy. *************************** Other packages we recommend *************************** * IPython_: Interactive Python environment; * Matplotlib_: Python plotting library. ******************************** Building from latest source code ******************************** Dependencies for build ====================== * A C compiler: Nipy does contain a few C extensions for optimized routines. Therefore, you must have a compiler to build from source. Use XCode_ for your C compiler on macOS. On Windows, you will need the Microsoft Visual C++ version corresponding to your Python version - see `using MSVC with Python `_. On Linux you should have the packages you need after you install the ``python3-dev`` (Debian / Ubuntu) or ``python3-devel`` (Fedora) packages using the instructions above; * Cython_ 0.12.1 or later: Cython is a language that is a fusion of Python and C. It allows us to write fast code using Python and C syntax, so that it is easier to read and maintain than C code with the same functionality; * Git_ version control software: follow the instructions on the `main git website `_ to install Git on Linux, macOS or Windows. Procedure ========= Please look through the :ref:`development quickstart ` documentation. There you will find information on building NIPY, the required software packages and our developer guidelines. Then: .. code-block:: bash # install Cython pip3 install --user cython .. code-block:: bash # Clone the project repository git clone https://github.com/nipy/nipy to get the latest development version, and: .. code-block:: bash # Build the latest version in-place cd nipy pip3 install --user --editable . to install the code in the development tree into your Python path. **************************** Installing useful data files **************************** See :ref:`data-files` for some instructions on installing data packages. .. include:: ../links_names.txt nipy-0.6.1/doc/users/introduction.rst000066400000000000000000000007031470056100100176500ustar00rootroot00000000000000.. _introduction: ============== Introduction ============== As you can see, we do not yet have much of a user guide for NIPY. We are spending all our effort in developing the building blocks of the code, and we have not yet returned to a guide to how to use it. We are starting to write general :ref:`tutorial-index`, that include introductions to how to use NIPY code to run analyses. .. toctree:: :maxdepth: 2 ../mission ../history nipy-0.6.1/doc/users/math_coordmap.rst000066400000000000000000000434631470056100100177560ustar00rootroot00000000000000.. _math-coordmap: ********************************************** Mathematical formulation of the Coordinate Map ********************************************** Using the *CoordinateMap* can be a little hard to get used to. For some users, a mathematical description, free of any python syntax and code design and snippets may be helpful. After following through this description, the code design and usage may be clearer. We return to the normalization example in :ref:`normalize-coordmap` and try to write it out mathematically. Conceptually, to do normalization, we need to be able to answer each of these three questions: 1. *Voxel-to-world (subject)* Given the subjects' anatomical image read off the scanner: which physical location, expressed in :math:`(x_s,y_s,z_s)` coordinates (:math:`s` for subject), corresponds to the voxel of data :math:`(i_s,j_s,k_s)`? This question is answered by *subject_im.coordmap*. The actual function that computes this, i.e that takes 3 floats and returns 3 floats, is *subject_im.coordmap.mapping*. 2. *World-to-world (subject to Tailarach)* Given a location :math:`(x_s,y_s,z_s)` in an anatomical image of the subject, where does it lie in the Tailarach coordinates :math:`(x_a,y_a, z_a)`? This is answered by the matrix *T* and knowing that *T* maps a point in the subject's world to Tailarach world. Hence, this question is answered by *subject_world_to_tailarach_world* above. 3. *Voxel-to-world (Tailarach)* Since we want to produce a resampled Image that has the same shape and coordinate information as *atlas_im*, we need to know what location in Tailarach space, :math:`(x_a,y_a,z_a)` (:math:`a` for atlas) corresponds to the voxel :math:`(i_a,j_a,k_a)`. This question is answered by *tailarach_cmap*. Each of these three questions are answered by, in code, what we called a class called *CoordinateMap*. Mathematically, let's define a *mapping* as a tuple :math:`(D,R,f)` where :math:`D` is the *domain*, :math:`R` is the *range* and :math:`f:D\rightarrow R` is a function. It may seem redundant to pair :math:`(D,R)` with :math:`f` because a function must surely know its domain and hence, implicitly, its range. However, we will see that when it comes time to implement the notion of *mapping*, the tuple we do use to construct *CoordinateMap* is almost, but not quite :math:`(D,R,f)` and, in the tuple we use, :math:`D` and :math:`R` are not redundant. Since these mappings are going to be used and called with modules like :mod:`numpy`, we should restrict our definition a little bit. We assume the following: 1. :math:`D` is isomorphic to one of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n` for some :math:`n`. This isomorphism is determined by a basis :math:`[u_1,\dots,u_n]` of :math:`D` which maps :math:`u_i` to :math:`e_i` the canonical i-th coordinate vector of whichever of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n`. This isomorphism is denoted by :math:`I_D`. Strictly speaking, if :math:`D` is isomorphic to :math:`\mathbb{Z}^n` then the term basis is possibly misleading because :math:`D` because it is not a vector space, but it is a group so we might call the basis a set of generators instead. In any case, the implication is that whatever properties the appropriate :math:`\mathbb{Z},\mathbb{R},\mathbb{C}`, so :math:`D` (and :math:`R`) has as well. 2. :math:`R` is similarly isomorphic to one of :math:`\mathbb{Z}^m, \mathbb{R}^m, \mathbb{C}^m` for some :math:`m` with isomorphism :math:`I_R` and basis :math:`[v_1,\dots,v_m]`. Above, and throughout, the brackets "[","]" represent things interpretable as python lists, i.e. sequences. These isomorphisms are just fancy ways of saying that the point :math:`x=3,y=4,z=5` is represented by the 3 real numbers (3,4,5). In this case the basis is :math:`[x,y,z]` and for any :math:`a,b,c \in \mathbb{R}` .. math:: I_D(a\cdot x + b \cdot y + c \cdot z) = a \cdot e_1 + b \cdot e_2 + c \cdot e_3 We might call the pairs :math:`([u_1,...,u_n], I_D), ([v_1,...,v_m], I_R)` *coordinate systems*. Actually, the bases in effect determine the maps :math:`I_D,I_R` as long as we know which of :math:`\mathbb{Z},\mathbb{R},\mathbb{C}` we are talking about so in effect, :math:`([u_1,...,u_n], \mathbb{R})` could be called a *coordinate system*. This is how it is implemented in the code with :math:`[u_1, \dots, u_n]` being replaced by a list of strings naming the basis vectors and :math:`\mathbb{R}` replaced by a builtin :func:`numpy.dtype`. In our normalization example, we therefore have 3 mappings: 1. *Voxel-to-world (subject)* In standard notation for functions, we can write .. math:: (i_s,j_s,k_s) \overset{f}{\mapsto} (x_s,y_s,z_s). The domain is :math:`D=[i_s,j_s,k_s]`, the range is :math:`R=[x_s,y_s,z_s]` and the function is :math:`f:D \rightarrow R`. 2. *World-to-world (subject to Tailarach)* Again, we can write .. math:: (x_s,y_s,z_s) \overset{g}{\mapsto} (x_a,y_a,z_a) The domain is :math:`D=[x_s,y_s,z_s]`, the range is :math:`R=[x_a,y_a,z_a]` and the function is :math:`g:D \rightarrow R`. 3. *Voxel-to-world (Tailarach)* Again, we can write .. math:: (i_a,j_a,k_a) \overset{h}{\mapsto} (x_a,y_a, z_a). The domain is :math:`D=[i_a,j_a,k_a]`, the range is :math:`R=[x_a,y_a,z_a]` and the function is :math:`h:D \rightarrow R`. Note that each of the functions :math:`f,g,h` can be, when we know the necessary isomorphisms, thought of as functions from :math:`\mathbb{R}^3` to itself. In fact, that is what we are doing when we write .. math:: (i_a,j_a,k_a) \overset{h}{\mapsto} (x_a,y_a, z_a) as a function that takes 3 numbers and gives 3 numbers. Formally, these functions that take 3 numbers and return 3 numbers can be written as :math:`\tilde{f}=I_R \circ f \circ I_D^{-1}`. When this is implemented in code, it is actually the functions :math:`\tilde{f}, \tilde{g}, \tilde{h}` we specify, rather then :math:`f,g,h`. The functions :math:`\tilde{f}, \tilde{g}, \tilde{h}` have domains and ranges that are just :math:`\mathbb{R}^3`. We therefore call a *coordinate map* a tuple .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), I_R \circ f \circ I_D^{-1}) where :math:`u_D, u_R` are bases for :math:`D,R`, respectively. It is this object that is implemented in code. There is a simple relationship between *mappings* and *coordinate maps* .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), \tilde{f}) \leftrightarrow (D, R, f=I_R^{-1} \circ \tilde{f} \circ I_D) Because :math:`\tilde{f}, \tilde{g}, \tilde{h}` are just functions from :math:`\mathbb{R}^3` to itself, they can all be composed with one another. But, from our description of the functions above, we know that only certain compositions make sense and others do not, such as :math:`g \circ h`. Compositions that do make sense include 1. :math:`h^{-1} \circ g` which :math:`(i_a,j_a, k_a)` voxel corresponds to the point :math:`(x_s,y_s,z_s)`? 2. :math:`g \circ f` which :math:`(x_a,y_a,z_a)` corresponds to the voxel :math:`(i,j,k)`? The composition that is used in the normalization example is :math:`w = f^{-1} \circ g^{-1} \circ h` which is a function .. math:: (i_a, j_a, k_a) \overset{w}{\mapsto} (i_s, j_s, k_s) This function, or more correctly its representation :math:`\tilde{w}` that takes 3 floats to 3 floats, is passed directly to :func:`scipy.ndimage.map_coordinates`. Manipulating mappings, coordinate systems and coordinate maps ============================================================= In order to solve our normalization problem, we will definitely need to compose functions. We may want to carry out other formal operations as well. Before describing operations on mappings, we describe the operations you might want to consider on coordinate systems. Coordinate systems ------------------ 1. *Reorder*: This is just a reordering of the basis, i.e. :math:`([u_1,u_2,u_3], \mathbb{R}) \mapsto ([u_2,u_3,u_1], \mathbb{R})` 2. *Product*: Topological product of the coordinate systems (with a small twist). Given two coordinate systems :math:`([u_1,u_2,u_3], \mathbb{R}), ([v_1, v_2], \mathbb{Z})` the product is represented as .. math:: ([u_1,u_2,u_3], \mathbb{R}) \times ([v_1, v_2], \mathbb{Z}) \mapsto ([u_1,u_2,u_3,v_1,v_2], \mathbb{R})`. Note that the resulting coordinate system is real valued whereas one of the input coordinate systems was integer valued. We can always embed :math:`\mathbb{Z}` into :math:`\mathbb{R}`. If one of them is complex valued, the resulting coordinate system is complex valued. In the code, this is handled by attempting to find a safe builtin numpy.dtype for the two (or more) given coordinate systems. Mappings -------- 1. *Inverse*: Given a mapping :math:`M=(D,R,f)` if the function :math:`f` is invertible, this is just the obvious :math:`M^{-1}=(R, D, f^{-1})`. 2. *Composition*: Given two mappings, :math:`M_f=(D_f, R_f, f)` and :math:`M_g=(D_g, R_g, g)` if :math:`D_f == R_g` then the composition is well defined and the composition of the mappings :math:`[M_f,M_g]` is just :math:`(D_g, R_f, f \circ g)`. 3. *Reorder domain / range*: Given a mapping :math:`M=(D=[i,j,k], R=[x,y,z], f)` you might want to specify that we've changed the domain by changing the ordering of its basis to :math:`[k,i,j]`. Call the new domain :math:`D'`. This is represented by the composition of the mappings :math:`[M, O]` where :math:`O=(D', D, I_D^{-1} \circ f_O \circ I_{D'})` and for :math:`a,b,c \in \mathbb{R}`: .. math:: f_O(a,b,c) = (b,c,a). 4. *Linearize*: Possibly less used, since we know that :math:`f` must map one of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n` to one of :math:`\mathbb{Z}^m, \mathbb{R}^m, \mathbb{C}^m`, we might be able differentiate it at a point :math:`p \in D`, yielding its 1st order Taylor approximation .. math:: f_p(d) = f(d) + Df_p(d-p) which is an affine function, thus creating an affine mapping :math:`(D, R, f_p)`. Affine functions are discussed in more detail below. 5. *Product*: Given two mappings :math:`M_1=(D_1,R_1,f_1), M_2=(D_2, R_2, f_2)` we define their product as the mapping :math:`(D_1 + D_2, R_1 + R_2, f_1 \otimes f_2)` where .. math:: (f_1 \otimes f_2)(d_1, d_2) = (f_1(d_1), f_2(d_2)). Above, we have taken the liberty of expressing the product of the coordinate systems, say, :math:`D_1=([u_1, \dots, u_n], \mathbb{R}), D_2=([v_1, \dots, v_m], \mathbb{C})` as a python addition of lists. The name *product* for this operation is not necessarily canonical. If the two coordinate systems are vector spaces and the function is linear, then we might call this map the *direct sum* because its domain are direct sums of vector spaces. The term *product* here refers to the fact that the domain and range are true topological products. Affine mappings --------------- An *affine mapping* is one in which the function :math:`f:D \rightarrow R` is an affine function. That is, it can be written as `f(d) = Ad + b` for :math:`d \in D` for some :math:`n_R \times n_D` matrix :math:`A` with entries that are in one of :math:`\mathbb{Z}, \mathbb{R}, \mathbb{C}`. Strictly speaking, this is a little abuse of notation because :math:`d` is a point in :math:`D` not a tuple of real (or integer or complex) numbers. The matrix :math:`A` represents a linear transformation from :math:`D` to :math:`R` in a particular choice of bases for :math:`D` and :math:`R`. Let us revisit some of the operations on a mapping as applied to *affine mappings* which we write as a tuple :math:`M=(D, R, T)` with :math:`T` the representation of the :math:`(A,b)` in homogeneous coordinates. 1. *Inverse*: If :math:`T` is invertible, this is just the tuple :math:`M^{-1}=(R, D, T^{-1})`. 2. *Composition*: The composition of two affine mappings :math:`[(D_2, R_2, T_2), (D_1,R_1,T_1)]` is defined whenever :math:`R_1==D_2` and is the tuple :math:`(D_1, R_2, T_2 T_1)`. 3. *Reorder domain*: A reordering of the domain of an affine mapping :math:`M=(D, R, T)` can be represented by a :math:`(n_D+1) \times (n_D+1)` permutation matrix :math:`P` (in which the last coordinate is unchanged -- remember we are in homogeneous coordinates). Hence a reordering of :math:`D` to :math:`D'` can be represented as :math:`(D', R, TP)`. Alternatively, it is the composition of the affine mappings :math:`[M,(\tilde{D}, D, P)]`. 4. *Reorder range*: A reordering of the range can be represented by a :math:`(n_R+1) \times (n_R+1)` permutation matrix :math:`\tilde{P}`. Hence a reordering of :math:`R` to :math:`R'` can be represented as :math:`(D, \tilde{R}, \tilde{P}T)`. Alternatively, it is the composition of the affine mappings :math:`[(R, \tilde{R}, \tilde{P}), M]`. 5. *Linearize*: Because the mapping :math:`M=(D,R,T)` is already affine, this leaves it unchanged. 6. *Product*: Given two affine mappings :math:`M_1=(D_1,R_1,T_1)` and :math:`M_2=(D_2,R_2,T_2)` the product is the tuple .. math:: \left(D_1+D_2,R_1+R_2, \begin{pmatrix} T_1 & 0 \\ 0 & T_2 \end{pmatrix} \right). 3-dimensional affine mappings ----------------------------- For an Image, by far the most common mappings associated to it are affine, and these are usually maps from a real 3-dimensional domain to a real 3-dimensional range. These can be represented by the ubiquitous :math:`4 \times 4` matrix (the representation of the affine mapping in homogeneous coordinates), along with choices for the axes, i.e. :math:`[i,j,k]` and the spatial coordinates, i.e. :math:`[x,y,z]`. We will revisit some of the operations on mappings as applied specifically to 3-dimensional affine mappings which we write as a tuple :math:`A=(D, R, T)` where :math:`T` is an invertible :math:`4 \times 4` transformation matrix with real entries. 1. *Inverse*: Because we have assumed that :math:`T` is invertible this is just tuple :math:`(([x,y,z], \mathbb{R}), ([i,j,k], \mathbb{R}), T^{-1})`. 2. *Composition*: Given two 3-dimensional affine mappings :math:`M_1=(D_1,R_1, T_1), M_2=(D_2,R_2,T_2)` the composition of :math:`[M_2,M_1]` yields another 3-dimensional affine mapping whenever :math:`R_1 == D_2`. That is, it yields :math:`(D_1, R_2, T_2T_1)`. 3. *Reorder domain* A reordering of the domain can be represented by a :math:`4 \times 4` permutation matrix :math:`P` (with its last coordinate not changing). Hence the reordering of :math:`D=([i,j,k], \mathbb{R})` to :math:`([k,i,j], \mathbb{R})` can be represented as :math:`(([k,i,j], \mathbb{R}), R, TP)`. 4. *Reorder range*: A reordering of the range can also be represented by a :math:`4 \times 4` permutation matrix :math:`\tilde{P}` (with its last coordinate not changing). Hence the reordering of :math:`R=([x,y,z], \mathbb{R})` to :math:`([z,x,y], \mathbb{R})` can be represented as :math:`(D, ([z,x,y], \mathbb{R}), \tilde{P}, T)`. 5. *Linearize*: Just as for a general affine mapping, this does nothing. 6. *Product*: Because we are dealing with only 3-dimensional mappings here, it is impossible to use the product because that would give a mapping between spaces of dimension higher than 3. Coordinate maps --------------- As noted above *coordinate maps* are equivalent to *mappings* through the bijection .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), \tilde{f}) \leftrightarrow (D, R, I_R^{-1} \circ \tilde{f} \circ I_D) So, any manipulations on *mappings*, *affine mappings* or *3-dimensional affine mappings* can be carried out on *coordinate maps*, *affine coordinate maps* or *3-dimensional affine coordinate maps*. Implementation ============== Going from this mathematical description to code is fairly straightforward. 1. A *coordinate system* is implemented by the class *CoordinateSystem* in the module :mod:`nipy.core.reference.coordinate_system`. Its constructor takes a list of names, naming the basis vectors of the *coordinate system* and an optional built-in numpy scalar dtype such as np.float32. It has no interesting methods of any kind. But there is a module level function *product* which implements the notion of the product of *coordinate systems*. 2. A *coordinate map* is implemented by the class *CoordinateMap* in the module :mod:`nipy.core.reference.coordinate_map`. Its constructor takes two coordinate has a signature *(mapping, input_coords(=domain), output_coords(=range))* along with an optional argument *inverse_mapping* specifying the inverse of *mapping*. This is a slightly different order from the :math:`(D, R, f)` order of this document. As noted above, the tuple :math:`(D, R, f)` has some redundancy because the function :math:`f` must know its domain, and, implicitly its range. In :mod:`numpy`, it is impractical to really pass :math:`f` to the constructor because :math:`f` would expect something of *dtype* :math:`D` and should return something of *dtype* :math:`R`. Therefore, *mapping* is actually a callable that represents the function :math:`\tilde{f} = I_R \circ f \circ I_D^{-1}`. Of course, the function :math:`f` can be recovered as :math:`f` = I_R^{-1} \circ \tilde{f} I_D`. In code, :math:`f` is roughly equivalent to: >>> from nipy.core.api import CoordinateMap, CoordinateSystem >>> in_cs = CoordinateSystem('ijk', 'voxels') >>> out_cs = CoordinateSystem('xyz', 'mm') >>> map = lambda x : x + 1 >>> coordmap = CoordinateMap(in_cs, out_cs, map) >>> domain = coordmap.function_domain >>> range = coordmap.function_range >>> f_tilde = coordmap.function >>> in_dtype = domain.coord_dtype >>> out_dtype = range.dtype >>> def f(d): ... return f_tilde(d.view(in_dtype)).view(out_dtype) The class *CoordinateMap* has an *inverse* property and there are module level functions called *product, compose, linearize* and it has methods *reordered_input, reordered_output*. For more detail on the ideas behind the coordmap design, see :ref:`coordmap-discussion` nipy-0.6.1/doc/users/plots/000077500000000000000000000000001470056100100155365ustar00rootroot00000000000000nipy-0.6.1/doc/users/plots/amplitudes.py000066400000000000000000000006631470056100100202640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at times [0,4,8,12,16] and amplitudes [0,1.1,2.3,0.9,0.3]. """ import numpy as np import pylab pylab.scatter([0,4,8,12,16], [0,1.1,2.3,0.9,0.3], c='r', marker='o') a = pylab.gca() a.set_yticks([0,2]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.6.1/doc/users/plots/block.py000066400000000000000000000012511470056100100172010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type with Faces presented at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18]. There are two values for Y: one for 'Face' and one for 'Object' """ import numpy as np import pylab for t in [0,4,8,12,16]: pylab.plot([t,t+0.5], [1,1], c='r', label='Face', linewidth=3) for t in [2,6,10,14,18]: pylab.plot([t,t+0.5], [0,0], c='b', label='Object', linewidth=3) a = pylab.gca() a.set_ylim([-0.1,1.1]) a.set_yticks([0,1]) a.set_yticklabels(['Object', 'Face']) a.set_xlim([-0.5,10]) a.set_xlabel('Time') nipy-0.6.1/doc/users/plots/event.py000066400000000000000000000012101470056100100172230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type with Faces presented at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18]. There are two values for Y: one for 'Face' and one for 'Object' """ import numpy as np import pylab pylab.scatter([0,4,8,12,16], [1,1,1,1,1], c='r', marker='o', label='Face') pylab.scatter([2,6,10,14,18], [0,0,0,0,0], c='b', marker='o', label='Object') a = pylab.gca() a.set_ylim([-0.1,1.1]) a.set_yticks([0,1]) a.set_yticklabels(['Object', 'Face']) a.set_xlim([-0.5,10]) a.set_xlabel('Time') nipy-0.6.1/doc/users/plots/event_amplitude.py000066400000000000000000000016451470056100100213030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import pylab from nipy.modalities.fmri.hrf import glover from nipy.modalities.fmri.utils import Symbol, events, lambdify_t # Symbol for amplitude a = Symbol('a') # Some event onsets regularly spaced onsets = np.linspace(0,50,6) # Make amplitudes from onset times (greater as function of time) amplitudes = onsets[:] # Flip even numbered amplitudes amplitudes = amplitudes * ([-1, 1] * 3) # Make event functions evs = events(onsets, amplitudes=amplitudes, g=a + 0.5 * a**2, f=glover) # Real valued function for symbolic events real_evs = lambdify_t(evs) # Time points at which to sample t_samples = np.linspace(0,60,601) pylab.plot(t_samples, real_evs(t_samples), c='r') for onset, amplitude in zip(onsets, amplitudes): pylab.plot([onset, onset],[0, 25 * amplitude], c='b') pylab.show() nipy-0.6.1/doc/users/plots/hrf.py000066400000000000000000000007571470056100100167000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Plot of the canonical Glover HRF """ import matplotlib.pyplot as plt import numpy as np from nipy.modalities.fmri import hrf, utils # hrf.glover is a symbolic function; get a function of time to work on arrays hrf_func = utils.lambdify_t(hrf.glover(utils.T)) t = np.linspace(0,25,200) plt.plot(t, hrf_func(t)) a=plt.gca() a.set_xlabel(r'$t$') a.set_ylabel(r'$h_{can}(t)$') nipy-0.6.1/doc/users/plots/hrf_delta.py000066400000000000000000000007751470056100100200510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This plot demonstrates a neuronal model that is a sum of delta functions times coefficient values """ import matplotlib.pyplot as plt # Coefficients for a and b ba = 1 bb = -2 # Times for a and b ta = [0,4,8,12,16] tb = [2,6,10,14,18] for t in ta: plt.plot([t,t],[0,ba],c='r') for t in tb: plt.plot([t,t],[0,bb],c='b') a = plt.gca() a.set_xlabel(r'$t$') a.set_ylabel(r'$n(t)$') nipy-0.6.1/doc/users/plots/hrf_different.py000066400000000000000000000016401470056100100207160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This example uses a different HRF for different event types """ import matplotlib.pyplot as plt import numpy as np from nipy.modalities.fmri import hrf from nipy.modalities.fmri.utils import T, lambdify_t # HRFs as functions of (symbolic) time glover = hrf.glover(T) afni = hrf.afni(T) ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = 1; bb = -2 na = ba * sum(glover.subs(T, T - t) for t in ta) nb = bb * sum(afni.subs(T, T - t) for t in tb) nav = lambdify_t(na) nbv = lambdify_t(nb) t = np.linspace(0,30,200) plt.plot(t, nav(t), c='r', label='Face') plt.plot(t, nbv(t), c='b', label='Object') plt.plot(t, nbv(t)+nav(t), c='g', label='Combined') for t in ta: plt.plot([t,t],[0,ba*0.5],c='r') for t in tb: plt.plot([t,t],[0,bb*0.5],c='b') plt.plot([0,30], [0,0],c='#000000') plt.legend() plt.show() nipy-0.6.1/doc/users/plots/neuronal_block.py000066400000000000000000000017021470056100100211050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent the neuronal block model with Faces at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18] each presented for 0.5 seconds and a coefficient of +1 for Faces, -2 for Objects. """ import numpy as np import pylab from sympy import Piecewise, Symbol, lambdify ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba'); bb = Symbol('bb'); t = Symbol('t') fa = sum(Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta)*ba fb = sum(Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb)*bb N = fa+fb Nn = N.subs(ba,1) Nn = Nn.subs(bb,-2) NNl = lambdify(t, Nn) tt = np.linspace(-1,21,121) pylab.plot(tt, [NNl(float(_t)) for _t in tt]) a = pylab.gca() a.set_ylim([-5.5,1.5]) a.set_ylabel('Neuronal (cumulative)') a.set_xlabel('Time') pylab.show() nipy-0.6.1/doc/users/plots/neuronal_event.py000066400000000000000000000016661470056100100211450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent the neuronal event-related model and a coefficient of +1 for Faces, -2 for Objects. """ import matplotlib.pyplot as plt import numpy as np from sympy import Heaviside, Symbol, lambdify ta = [0, 4, 8, 12, 16] tb = [2, 6, 10, 14, 18] ba = Symbol('ba') bb = Symbol('bb') t = Symbol('t') fa = sum(Heaviside(t - _t) for _t in ta) * ba fb = sum(Heaviside(t - _t) for _t in tb) * bb N = fa + fb Nn = N.subs(ba, 1) Nn = Nn.subs(bb, -2) # Use Numpy heaviside for lambdify, with y=1 for x=0. modules = [{'Heaviside': lambda x, y: np.heaviside(x, 1)}, 'numpy'] neuronal_func = lambdify(t, Nn, modules=modules) tt = np.linspace(-1, 21, 1201) neuronal = neuronal_func(tt) plt.step(tt, neuronal) a = plt.gca() a.set_ylim([-5.5, 1.5]) a.set_ylabel('Neuronal (cumulative)') a.set_xlabel('Time') plt.show() nipy-0.6.1/doc/users/plots/random_amplitudes.py000066400000000000000000000010031470056100100216110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at times [0,4,8,12,16] and random amplitudes centered at [0,1.1,2.3,0.9,0.3]. """ import numpy as np import pylab for t, y in zip([0,4,8,12,16], [0,1.1,2.3,0.9,0.3]): pylab.plot([t,t], [y-0.1,y+0.1], c='r', linewidth=3) a = pylab.gca() a.set_yticks([0,2]) a.set_xlim([-1,18]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.6.1/doc/users/plots/random_amplitudes_times.py000066400000000000000000000011241470056100100230160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at random times centered [0,2,4,6,8] and random amplitudes centered [0,1.1,2.3,0.9,0.3]. """ import matplotlib import numpy as np import pylab for t, y in zip([0,4,8,12,16], [0,1.1,2.3,0.9,0.3]): dt = np.array([-0.5,0.5,0.5,-0.5]) dy = np.array([-0.1,-0.1,0.1,0.1]) pylab.fill(t+dt,y+dy, 'r') a = pylab.gca() a.set_yticks([0,2]) a.set_xlim([-1,18]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.6.1/doc/users/plots/sinusoidal.py000066400000000000000000000010121470056100100202540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent a continuous stimulus having two features, Orientation and Contrast """ import numpy as np import pylab t = np.linspace(0,10,1000) o = np.sin(2*np.pi*(t+1)) * np.exp(-t/10) c = np.sin(2*np.pi*(t+0.2)/4) * np.exp(-t/12) pylab.plot(t, o, label='Orientation') pylab.plot(t, c+2.1, label='Contrast') pylab.legend() a = pylab.gca() a.set_yticks([]) a.set_xlabel('Time') nipy-0.6.1/doc/users/scipy_orientation.rst000066400000000000000000000044441470056100100206770ustar00rootroot00000000000000============================== Geography of the Scipy world ============================== in which we briefly describe the various components you are likely to come across when writing scientific python software in general, and NIPY code in particular. Numpy ===== NumPy_ is the basic Python array-manipulation package. It allows you to create, slice and manipulate N-D arrays at near C speed. It also has basic arithmetical and mathematical functions (such as sum, mean, and log, exp, sin, cos), matrix multiplication (``numpy.dot``), Fourier transforms (``numpy.fft``) and basic linear algebra ``numpy.linalg``. SciPy ===== Scipy_ is a large umbrella project that builds on Numpy (and depends on it). It includes a variety of high level science and engineering modules together as a single package. There are extended modules for linear algebra (including wrappers to BLAS and LAPACK), optimization, integration, sparse matrices, special functions, FFTs, signal and image processing, genetic algorithms, ODE solvers, and others. Matplotlib ========== Matplotlib_ is a 2D plotting package that depends on NumPy_. It has a simple matlab-like plotting syntax that makes it relatively easy to create good-looking plots, histograms and images with a small amount of code. As well as this simplified Matlab-like syntax, There is also a more powerful and flexible object-oriented interface. Ipython ======= Ipython_ is an interactive shell for python that has various features of the interactive shell of Matlab, Mathematica and R. It works particularly well with Matplotlib_, but is also an essential tool for interactive code development and code exploration. It contains libraries for creainteracting with parallel jobs on clusters or over several CPU cores in a fairly transparent way. Cython ====== Cython_ is a development language that allows you to write a combination of Python and C-like syntax to generate Python extensions. It is especially good for linking C libraries to Python in a readable way. It is also an excellent choice for optimization of Python code, because it allows you to drop down to C or C-like code at your bottlenecks without losing much of the readability of Python. Mayavi ====== Mayavi_ is a high-level python interface to the VTK_ plotting libraries. .. include:: ../links_names.txt nipy-0.6.1/doc/users/tutorial.rst000066400000000000000000000002011470056100100167630ustar00rootroot00000000000000.. _tutorial-index: =========== Tutorials =========== .. toctree:: :maxdepth: 2 basic_io coordinate_map glm_spec nipy-0.6.1/examples/000077500000000000000000000000001470056100100143055ustar00rootroot00000000000000nipy-0.6.1/examples/.gitignore000066400000000000000000000002001470056100100162650ustar00rootroot00000000000000# Generated data files ammon_TO_anubis.npy labs/fmri_data.nii labs/localizer_paradigm.csv labs/need_data/results/ labs/zmap.nii nipy-0.6.1/examples/affine_registration.py000077500000000000000000000071151470056100100207100ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This script requires the nipy-data package to run. It is an example of inter-subject affine registration using two MR-T1 images from the sulcal 2000 database acquired at CEA, SHFJ, Orsay, France. The source is 'ammon' and the target is 'anubis'. Running it will result in a resampled ammon image being created in the current directory. """ import time from optparse import OptionParser import numpy as np from nipy import load_image, save_image from nipy.algorithms.registration import HistogramRegistration, resample from nipy.utils import example_data print('Scanning data directory...') # Input images are provided with the nipy-data package source = 'ammon' target = 'anubis' source_file = example_data.get_filename('neurospin', 'sulcal2000', 'nobias_' + source + '.nii.gz') target_file = example_data.get_filename('neurospin', 'sulcal2000', 'nobias_' + target + '.nii.gz') # Parse arguments parser = OptionParser(description=__doc__) doc_similarity = 'similarity measure: cc (correlation coefficient), \ cr (correlation ratio), crl1 (correlation ratio in L1 norm), \ mi (mutual information), nmi (normalized mutual information), \ pmi (Parzen mutual information), dpmi (discrete Parzen mutual \ information). Default is crl1.' doc_renormalize = 'similarity renormalization: 0 or 1. Default is 0.' doc_interp = 'interpolation method: tri (trilinear), pv (partial volume), \ rand (random). Default is pv.' doc_optimizer = 'optimization method: simplex, powell, steepest, cg, bfgs. \ Default is powell.' parser.add_option('-s', '--similarity', dest='similarity', help=doc_similarity) parser.add_option('-r', '--renormalize', dest='renormalize', help=doc_renormalize) parser.add_option('-i', '--interp', dest='interp', help=doc_interp) parser.add_option('-o', '--optimizer', dest='optimizer', help=doc_optimizer) opts, args = parser.parse_args() # Optional arguments similarity = 'crl1' renormalize = False interp = 'pv' optimizer = 'powell' if opts.similarity is not None: similarity = opts.similarity if opts.renormalize is not None: renormalize = bool(int(opts.renormalize)) if opts.interp is not None: interp = opts.interp if opts.optimizer is not None: optimizer = opts.optimizer # Print messages print(f'Source brain: {source}') print(f'Target brain: {target}') print(f'Similarity measure: {similarity}') print(f'Optimizer: {optimizer}') # Get data print('Fetching image data...') I = load_image(source_file) J = load_image(target_file) # Perform affine registration # The output is an array-like object such that # np.asarray(T) is a customary 4x4 matrix print('Setting up registration...') tic = time.time() R = HistogramRegistration(I, J, similarity=similarity, interp=interp, renormalize=renormalize) T = R.optimize('affine', optimizer=optimizer) toc = time.time() print(f' Registration time: {toc - tic:f} sec') # Resample source image print('Resampling source image...') tic = time.time() #It = resample2(I, J.coordmap, T.inv(), J.shape) It = resample(I, T.inv(), reference=J) toc = time.time() print(f' Resampling time: {toc - tic:f} sec') # Save resampled source outroot = source + '_TO_' + target outimg = outroot + '.nii.gz' print (f'Saving resampled source in: {outimg}') save_image(It, outimg) # Save transformation matrix outparams = outroot + '.npy' np.save(outparams, np.asarray(T)) nipy-0.6.1/examples/algorithms/000077500000000000000000000000001470056100100164565ustar00rootroot00000000000000nipy-0.6.1/examples/algorithms/README.txt000066400000000000000000000002151470056100100201520ustar00rootroot00000000000000################### Algorithms examples ################### Demos of mixture model and clustering algorithms. Examples require matplotlib. nipy-0.6.1/examples/algorithms/bayesian_gaussian_mixtures.py000077500000000000000000000043041470056100100244610ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of a demo that fits a Bayesian Gaussian Mixture Model (GMM) to a dataset. Variational bayes and Gibbs estimation are successively run on the same dataset. Requires matplotlib Author : Bertrand Thirion, 2008-2010 """ print(__doc__) import numpy as np import numpy.random as nr try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.clustering import bgmm from nipy.algorithms.clustering.gmm import plot2D dim = 2 ############################################################################### # 1. generate a 3-components mixture x1 = nr.randn(25, dim) x2 = 3 + 2 * nr.randn(15, dim) x3 = np.repeat(np.array([-2, 2], ndmin=2), 10, 0) + 0.5 * nr.randn(10, dim) x = np.concatenate((x1, x2, x3)) ############################################################################### #2. fit the mixture with a bunch of possible models, using Variational Bayes krange = range(1, 10) be = - np.inf for k in krange: b = bgmm.VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) ek = float(b.evidence(x)) if ek > be: be = ek bestb = b print(k, 'classes, free energy:', ek) ############################################################################### # 3. plot the result z = bestb.map_label(x) plot2D(x, bestb, z, verbose=0) plt.title('Variational Bayes') ############################################################################### # 4. the same, with the Gibbs GMM algo niter = 1000 krange = range(1, 6) bbf = - np.inf for k in krange: b = bgmm.BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = bgmm.BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfk = bplugin.bayes_factor(x, pz.astype(np.int_), nperm=120) print(k, 'classes, evidence:', bfk) if bfk > bbf: bestk = k bbf = bfk bbgmm = bplugin z = bbgmm.map_label(x) plot2D(x, bbgmm, z, verbose=0) plt.title('Gibbs sampling') plt.show() nipy-0.6.1/examples/algorithms/clustering_comparisons.py000077500000000000000000000030411470056100100236250ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Simple demo that partitions a smooth field into 10 clusters. In most cases, Ward's clustering behaves best. Requires matplotlib Author: Bertrand Thirion, 2009 """ print(__doc__) import numpy as np import numpy.random as nr from scipy.ndimage import gaussian_filter try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.graph.field import Field dx = 50 dy = 50 dz = 1 nbseeds = 10 data = gaussian_filter( np.random.randn(dx, dy), 2) F = Field(dx * dy * dz) xyz = np.reshape(np.indices((dx, dy, dz)), (3, dx * dy * dz)).T.astype(np.int_) F.from_3d_grid(xyz, 6) F.set_field(data) seeds = np.argsort(nr.rand(F.V))[:nbseeds] seeds, label, J0 = F.geodesic_kmeans(seeds) wlabel, J1 = F.ward(nbseeds) seeds, label, J2 = F.geodesic_kmeans(seeds, label=wlabel.copy(), eps=1.e-7) print('Inertia values for the 3 algorithms: ') print('Geodesic k-means: ', J0, 'Wards: ', J1, 'Wards + gkm: ', J2) plt.figure(figsize=(8, 4)) plt.subplot(1, 3, 1) plt.imshow(np.reshape(data, (dx, dy)), interpolation='nearest') plt.title('Input data') plt.subplot(1, 3, 2) plt.imshow(np.reshape(wlabel, (dx, dy)), interpolation='nearest') plt.title('Ward clustering \n into 10 components') plt.subplot(1, 3, 3) plt.imshow(np.reshape(label, (dx, dy)), interpolation='nearest') plt.title('geodesic kmeans clust. \n into 10 components') plt.show() nipy-0.6.1/examples/algorithms/gaussian_mixture_models.py000077500000000000000000000022021470056100100237610ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of a demo that fits a Gaussian Mixture Model (GMM) to a dataset The possible number of clusters is in the [1,10] range The proposed algorithm correctly selects a solution with 2 or 3 classes Requires matplotlib Author : Bertrand Thirion, 2008-2009 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.clustering import gmm dim = 2 # 1. generate a 3-components mixture x1 = np.random.randn(100, dim) x2 = 3 + 2 * np.random.randn(50, dim) x3 = np.repeat(np.array([- 2, 2], ndmin=2), 30, 0) \ + 0.5 * np.random.randn(30, dim) x = np.concatenate((x1, x2, x3)) # 2. fit the mixture with a bunch of possible models krange = range(1, 5) lgmm = gmm.best_fitting_GMM(x, krange, prec_type='diag', niter=100, delta=1.e-4, ninit=1, verbose=0) # 3, plot the result z = lgmm.map_label(x) gmm.plot2D(x, lgmm, z, verbose=0) plt.show() nipy-0.6.1/examples/algorithms/mixed_effects.py000077500000000000000000000035351470056100100216460ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This example illustrates the impact of using a mixed-effects model for the detection of the effects, when the first-level variance is known: If the first level variance is very variable across observations, then taking it into account gives more relibale detections, as seen in an ROC curve. Requires matplotlib. Author: Bertrand Thirion, 2012 """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from nipy.algorithms.statistics.mixed_effects_stat import ( generate_data, one_sample_ttest, t_stat, ) # generate the data N, P = 15, 500 V1 = np.random.randn(N, P) ** 2 effects = 0.5 * (np.random.randn(P) > 0) Y = generate_data(np.ones(N), effects, .25, V1) # compute the statistics T1 = one_sample_ttest(Y, V1, n_iter=5) T1 = [T1[effects == x] for x in np.unique(effects)] T2 = [t_stat(Y)[effects == x] for x in np.unique(effects)] # Derive ROC curves ROC1 = np.array([np.sum(T1[1] > - x) for x in np.sort(- T1[0])])\ * 1. / T1[1].size ROC2 = np.array([np.sum(T2[1] > - x) for x in np.sort(- T2[0])])\ * 1. / T1[1].size # make a figure FIG = plt.figure(figsize=(10, 5)) AX = FIG.add_subplot(121) AX.plot(np.linspace(0, 1, len(ROC1)), ROC1, label='mixed effects') AX.plot(np.linspace(0, 1, len(ROC2)), ROC2, label='t test') AX.set_xlabel('false positives') AX.set_ylabel('true positives') AX.set_title('ROC curves for the detection of effects', fontsize=12) AX.legend(loc='lower right') AX = FIG.add_subplot(122) AX.boxplot(T1, positions=[-0.1, .9]) AX.boxplot(T2, positions=[0.1, 1.1]) AX.set_xticks([0, 1]) AX.set_xlabel('simulated effects') AX.set_ylabel('decision statistic') AX.set_title('left: mixed effects model, \n right: standard t test', fontsize=12) plt.show() nipy-0.6.1/examples/algorithms/ward_clustering.py000077500000000000000000000037111470056100100222310ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Demo ward clustering on a graph: various ways of forming clusters and dendrogram Requires matplotlib """ print(__doc__) import numpy as np from numpy.random import rand, randn try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.clustering.hierarchical_clustering import ward from nipy.algorithms.graph import knn # n = number of points, k = number of nearest neighbours n = 100 k = 5 # Set verbose to True to see more printed output verbose = False X = randn(n, 2) X[:int(np.ceil(n / 3))] += 3 G = knn(X, 5) tree = ward(G, X, verbose) threshold = .5 * n u = tree.partition(threshold) plt.figure(figsize=(12, 6)) plt.subplot(1, 3, 1) for i in range(u.max()+1): plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand())) plt.axis('tight') plt.axis('off') plt.title(f'clustering into clusters \n of inertia < {threshold:g}') u = tree.split(k) plt.subplot(1, 3, 2) for e in range(G.E): plt.plot([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]], [X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]], 'k') for i in range(u.max() + 1): plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand())) plt.axis('tight') plt.axis('off') plt.title('clustering into 5 clusters') nl = np.sum(tree.isleaf()) validleaves = np.zeros(n) validleaves[:int(np.ceil(n / 4))] = 1 valid = np.zeros(tree.V, 'bool') valid[tree.isleaf()] = validleaves.astype('bool') nv = np.sum(validleaves) nv0 = 0 while nv > nv0: nv0 = nv for v in range(tree.V): if valid[v]: valid[tree.parents[v]]=1 nv = np.sum(valid) ax = plt.subplot(1, 3, 3) ax = tree.plot(ax) ax.set_title('Dendrogram') ax.set_visible(True) plt.show() if verbose: print('List of sub trees') print(tree.list_of_subtrees()) nipy-0.6.1/examples/compute_fmri_contrast.py000077500000000000000000000050231470056100100212700ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import sys USAGE = f""" usage : python {sys.argv[0]} [1x4-contrast] where [1x4-contrast] is optional and is something like 1,0,0,0 If you don't enter a contrast, 1,0,0,0 is the default. An activation image is displayed. This script requires the nipy-data package to run. It is an example of using a general linear model in single-subject fMRI data analysis context. Two sessions of the same subject are taken from the FIAC'05 dataset. The script also needs matplotlib installed. Author: Alexis Roche, Bertrand Thirion, 2009--2012. """ __doc__ = USAGE import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.glm import FMRILinearModel from nipy.utils import example_data # Optional argument - default value 1, 0, 0, 0 nargs = len(sys.argv) if nargs not in (1, 2, 5): print(USAGE) exit(1) if nargs == 1: # default no-argument case cvect = [1, 0, 0, 0] else: if nargs == 2: # contrast as one string args = sys.argv[1].split(',') elif nargs == 5: # contrast as sequence of strings args = [arg.replace(',', '') for arg in sys.argv[1:]] if len(args) != 4: print(USAGE) exit(1) try: cvect = [float(arg) for arg in args] except ValueError: print(USAGE) exit(1) # Input files fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['X'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] plot_map(z_map.get_fdata(), z_map.affine, anat=mean_map.get_fdata(), anat_affine=mean_map.affine, cmap=cm.cold_hot, threshold=2.5, black_bg=True) plt.show() nipy-0.6.1/examples/core/000077500000000000000000000000001470056100100152355ustar00rootroot00000000000000nipy-0.6.1/examples/core/parcel_generator.py000077500000000000000000000022101470056100100211210ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example showing how to use the parcel generator. We load an image with ROI definitions and calculate the number of voxels in each ROI. """ print(__doc__) from os.path import dirname from os.path import join as pjoin import nipy from nipy.core.utils.generators import parcels OUR_PATH = dirname(__file__) DATA_PATH = pjoin(OUR_PATH, '..', 'data') BG_IMAGE_FNAME = pjoin(DATA_PATH, 'mni_basal_ganglia.nii.gz') bg_img = nipy.load_image(BG_IMAGE_FNAME) bg_data = bg_img.get_fdata() """ I happen to know that the image has these codes: 14 - Left striatum 16 - Right striatum 39 - Left caudate 53 - Right caudate All the other voxels are zero, I don't want those. """ print("Number of voxels for L, R striatum; L, R caudate") for mask in parcels(bg_data, exclude=(0,)): print(mask.sum()) """ Given we know the codes we can also give them directly """ print("Again with the number of voxels for L, R striatum; L, R caudate") for mask in parcels(bg_data, labels=(14, 16, 39, 53)): print(mask.sum()) nipy-0.6.1/examples/create_tempimage.py000077500000000000000000000016231470056100100201570ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """This example shows how to create a temporary image to use during processing. The array is filled with zeros. """ import numpy as np from nipy import load_image, save_image from nipy.core.api import Image, vox2mni # create an array of zeros, the shape of your data array zero_array = np.zeros((91,109,91)) # create an image from our array. The image will be in MNI space img = Image(zero_array, vox2mni(np.diag([2, 2, 2, 1]))) # save the image to a file newimg = save_image(img, 'tempimage.nii.gz') # Example of creating a temporary image file from an existing image with a # matching coordinate map. img = load_image('tempimage.nii.gz') zeroarray = np.zeros(img.shape) zeroimg = Image(zeroarray, img.coordmap) newimg = save_image(zeroimg, 'another_tempimage.nii.gz') nipy-0.6.1/examples/data/000077500000000000000000000000001470056100100152165ustar00rootroot00000000000000nipy-0.6.1/examples/data/README_mni_basal_ganglia.rst000066400000000000000000000040111470056100100223700ustar00rootroot00000000000000############################################# README for ``mni_basal_ganglia.nii.gz`` image ############################################# I extracted these basal ganglia definitions from the MNI ICBM 2009c Nonlinear Symmetric template at 1×1x1 mm resolution. At the time, the templates were available here: http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009 The script to extract the data was:: from os.path import join as pjoin import numpy as np import nibabel as nib atlas_fname = pjoin('mni_icbm152_nlin_sym_09c', 'mni_icbm152_t1_tal_nlin_sym_09a_atlas', 'AtlasGrey.mnc') atlas_img = nib.load(atlas_fname) # Data is in fact uint8, but with trivial float scaling data = np.array(atlas_img.dataobj).astype(np.uint8) bg_data = np.zeros_like(data) for code in (14, 16, 39, 53): # LR striatum, LR caudate in_mask = data == code bg_data[in_mask] = code bg_img = nib.Nifti1Image(bg_data, atlas_img.affine) bg_img = nib.as_closest_canonical(bg_img) nib.save(bg_img, 'basal_ganglia.nii.gz') ********** Data codes ********** These are the values in the image: * 14 - Left striatum * 16 - Right striatum * 39 - Left caudate * 53 - Right caudate Everything else is zero. ******* License ******* Contents of the file ``COPYING`` in the template archive: Copyright (C) 1993-2004 Louis Collins, McConnell Brain Imaging Centre, Montreal Neurological Institute, McGill University. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies. The authors and McGill University make no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. The authors are not responsible for any data loss, equipment damage, property loss, or injury to subjects or patients resulting from the use or misuse of this software package. nipy-0.6.1/examples/data/mni_basal_ganglia.nii.gz000066400000000000000000001433441470056100100217560ustar00rootroot00000000000000Rbasal_ganglia.nii͑J{`Hryb\h^8 ǠBQHv03\{@O}_Ows?t:mx_g}_\|Ϯourp8{po W @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ӑ9F9#uwŸ|BaBDY+Հ~ʞCTnN܌|R,FR,n$@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ =1_vtcBL) <)32D1Ƙ3C )QCtc)9Fg BX~~^UüNlt oN܌|R,Fؾ.  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @@4Ĉڇ8%q8ĕg!"O.pñȯ!c6D.cJ [ /?~EQ[lt 3Dw<Ff:ŪGsjx#YJ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @Zv2dKngb!BtCc9ЇCΨ 1&SZW%|D,< -7a4׍_L#%b.c<2s<˜!j%R1Ri)Co]"DNr :BB_CI]JOJy ;jIt]Q9b" 1C)4/*!fwTwW m6'IvRߛJ!c5D1Sp̉?"^R)aN1f1JB=TC9T9X*C[ɐS #Ze{*RR*! +Q(Izi9lOUkǔe~oR|?_|vi{>('9F!EQVEq!63l(ÜM)"끤Uq6a.>" :ڠ=ߎ ڎ72*DϪZ{1ORcK1^4M @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @n'y?s)ABqK(O>ݱjܻH!bBΗtDxVMaэ8P@ :ͧ+;M~C>]ipfǁeӂ34?e/zV!RR*c8LbfC̵3L1 9F!(>gj1gH1fVvZ]3,B<V*ĭdXh3Ny86ʰDlOMZ| Qc|Gn2ĬDN2<SsJQgbF6X9C]m@毀eitv"C9B&!D~̤컕|lP|:i?ԭ Qe6&D*~h4q!B+є u+Q>n1w~D0U-\MqMyB0')D!bYi]{Ș Q&TtHфhPMk]! {9ׅX,eHuո j t 1*Fk[%J 7S/|+  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @v~?ηS)aBܺcոw}(gP8 cm:ٞRSwq M!"R!qjWG4@r6y{1O)Bu=WwEKb?!->71}O isC-J11>9%BBfJ~)9j~do?ɕOp^h2Y߭P6BV\z:!U-EsfMkc+@(>#DUf:KQAtQ]ue%UvfM)>!DaU /J2G;}>,F!^x\6ð"c;'Tj oJE@!ҟm%*vB ju2rBv;&D.E[O [0J%6TRl6!\F7  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @@1Ov&%ĭ;d.5!N%. !IsL'k ӑEZa6)C_qvm>Ms,^%>"/NGWvtxQD w>ڜJ! 1z?<==RǣX|1 C?QESi 2y3C"9C=|j2My{F_ /6B Jd! E%!ʮy5ue%xЫ) M N?-Ī{GQJ% lZ&T9V)”b!bbE)Ů! b3DOgZ۹5cQ<-"¼"Q.RTPy4[hB]X MhtYXNj t 1*Fk[%J 7?"DK @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @dKΥ q"ƽ?.<[tǪq"8X!BJqr:_}X*q ɇj\ͳvqxQ:N%b|*q`t2DZعLH+{on["ʒfS9I3bY !J>L)C)."ʿӲӯzИbE1 3Xȏ/E(;Tnu̐V0e꯼X/)N5e))PmHUaU16Z*!6TRl6!F1ɦX7  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ pBv~%oR܃8&ĭ;T3B\CTBKȓǡN^uR ӫqɅ}#sxQٜ.)eB|ҚػudaӾ2{REx9ͺ.ExCO*E>U!_à=6S|[) tS mqJ M!E!lZ̧!DІSUǟw#?iO%~d4#ijO?G}Dz]ͦʮK#ZJf6e*sEq]x?2R q~g ʰ9^ڢ~f(':Xؼ1ĉOCeYCd!Ma_y2_^<)`J4[hB]!CkS,#]V)6Uh sRIG-$c%6[$@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ ۉCtĸߣ8'ı.R3jB\|H;ݠLmw;3ԕk:ޑv0ߝ) BZU細t{cDM}9y:OlbO!\(+WeV?1x4P()~SgQeR&DNE:CS9C]M0!R #ghB )q2C m>/ Q%Ox"/wxKCiOWhJ ! &*VhCC_٫#(! C!)LٟG|0oFR2lV/HJ*R̅iJOyIDZB%&vMQfS+E!@*E M*ţxx>:CBt)19sxS!ZRDЖ"ͨ R4bL^^i*c!)i8_1/lKdXTOQ g<̧Q!Ϻ͢P"DzU*1tϸ A20ŒȎmHW4WMN!-R4m.).êblUBllr#BcMձ-n @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ >ۉ{e۩1Rɽ?wX{'ŕ862Džc9a:ǁ)6B&Av'DIKxJOӻ.+[isvED-Kdb_.iW)BĪĎu!CC,lCxCg9:CBtM)rt~NG )C1 s%աzB! ?8 d"y(EaYM{ dh8~E6:3h%+DFQϧ_.Ş QO .1ʢb!}{viN32vA 1-U!2|Fqm(je _0g"so|dwY))PmH%J 7R4#f @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @Ov'^v*D>ű~ b8uMaBUOE6:3h%+DkCTXdz:A.FʮE QRBWvWU?"yJ"Ĕ"!!^H)'cCL#[ͧ!DQ e";USE%-R4m.).KQAn ic96G4N7  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @xGOvw{bȱ>D`8bXq]R!.@&/qUZB^WwJ\ yUiޜ"}Iݓ/:kbg)r!bM])J!b|L e#)ǪsWgW<1 tdŔ"EXH)f) 9ET>3D)ļ(J% m)*4 nBfi7R)Ϡ!R vju)~mmTϧ BrEJRԏxbCR_A*ŪەxN)'C3 Rb}uwWpտ\Uay5 QŬjNXm)ckc,]!6UhrQIp/!@C @R/E# @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ nmo~t8dsC=/48b eI8^"J_isNⲭJ-<"myqtڼꗧӾ l:I1)1t\:gV})bص?ôv- ߮bޝv̧z6NFtdt2!:D!F!!sǪN!HE!r74D.c.ż$Jyݿ̧w! S)B,KoB4|jB!"(vx\e9^` B"3բfp>bbZwEcc vr2C׭KeqB_!Jlitіb16Ţ1ebcXu(JAn!^g4_$W4 @ @ @ @ @ @ @g/X"K@fԚ?KHouyvDՏ @ @ @ @ @ @ @ @ @ @ @`xl'(]s+]Qa ){RTR !T)v (Jl$DvO]7Ez:m3ĶRbU! BwΖ;vvK)Z; q81m*mX5a>bB|t'͔n{ڭ>ebxUG⇨P! A*1)Eae)}*)ijK-(S? e 1J1,L^;e-~z/hRdHvVyWR"PM!<½B 1&^C?% vJ 8J?B] E1z *ͻWPCCe Dv_! !;Dž/Ji~p3o݄G:UvmCuC4){"DlZ5Q4D?{G 3 o" {#Q-Fq^Ucf8ȡ$쌒1)ӧXq 26!̢ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @((hN+v]wEqBvʰ?Du=)|S!,mR;OͺB'\Ub'EMBlKgؒb`!]4T⨶>CK1dMER{$ҫvi!VY4c_ -:ųnDp]C[uxUY)In QW-}>*Q"D,CʽvC7{AC)f DBd 1[WNB 3̇RpB,T/EJ!8R%RĪD(&C)"Xf:*2AO4Q臂L o" {`Jf6J12I174xcoBIEbv:X^I!zgX?\bY<-y[" Q&AOd~ AQ+#YY7͵Ө|' 15QdQ\>aCtY+{XB ~/ /ET'3S$*N{Rv !Ry2.DL xE!B N})'Sh*1ɰmv[CDd ?(~,Τ&!@;)`F)P/)F^f!&RLFHB)1;bv>C b,` :&)!̡N @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ )F!};m?+"y!R{?&8R%ʵ,;7 Ud61,6@/Rq%yOPQlvX 1j1 s ̤3Q )fg4rN!VHbqPSi4)VOxfu7&@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @<.ou\tTSr;Qm;.BIQ؞ Ď/^Tbk)B=BlK"Z%bK*! MکRl@BlS!ųڲw}U[ Q7T=M"W4 ~,]G;YC7e"D.R=owi!R(R!+-:髛xu!;M?hS{?6PfU+E?b 10|PCSCiqa~siW+Q"x8Rq%yPQlvX7 1K1 2̴3A%!fgqpH1;!p})CnBNɱzŃu7sS90 @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @@G5ou\$);n(ێNl}!BT)N?(U"n*!gSmz+Dq[-D%WBM ꌓۅTYóڲzU[uQog`1D7BUD_#TOQ5UvJ?(Dن4CJT"vAQNC4ʐkt @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ ?xzM,O1 2 1Wb2@bfRi 3JR̎88bv8>| !w!'ӥXEaW=JȺ)n%*-!Q%(Mh.S 5b4| 2aB ĵû*1 {p7 1K1 2d^YhiI)fJB(I1;bv>C  S,8܅XLbu)V  @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ CY!y!;8V瞩~~Vb> zWb41!F g*1y0nbF6-pCI%fgqpH1;!p})CB,NKxB6L+'rL 2l @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ <^z}x}(8<*qwWϭ=8  [glEqIQ1w 1{.|%V,xG?P]= t@(H+1HgWCdX B\²k"/E-ąJd+1HG^ D6[0JLf!&:fJ*1;#!fYN8xJ~ZqMam$P]b +sC"@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @C/8Yn>8SSTb(ަ(I7|"ǮD~Y! [ok&~H4bvm{ţBGߪ3To+6Mȅ(?&ޤh;D[jPZC}rg2!Ѣ;b}M^Yqt]G^Bl/SIwP%v~JN*Tpҝ7_%&hY%& r(̔pCI%)fC"DIXQO !w!'ӥXbsR,`N!V8Ra @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @Ij{{W=R‡XOdNkiuY_~) ?P{XJ*!ĹOзwK_>nv'!A'h *1uO'!on kUkƒnA~}A_nIΎ8Tbv>B̲pfXiJ,LB}w!CB,N+ +3iSq-B\h @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @n.𬶷_NCQGݩkˢkB;5|ix GѷR)BB; qDO|qޡ_ú-ġݱfi3IIoN@%N|[Lv*qެ ?(Npxd~!Dѧxba<[Xi]%C!3EbCu-VQX7D @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @HYmoUmoϺ:ěu!Nѝ*,n&8߾]+ϧ~Ip#WbiQKGN_zJT C/UC9J|w*q(7S//m?C"DItXOO'Sζ+gk2:ʸ! @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @f[l\:(ĥɛx&ƻm7"J-nUE-XB\65a*T~]~JjEx}%g!.'"@)J(pvZai?(V0TbeuCB$ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @\(𬶷g]vBM:'H%Nm%v$/l!>.Y'jd޷,l!Ѣ{{+⍻G +3 7QQR:{TbݢCvX1Tbe&?be @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @(7_K) B]&8[ħR'ⓧSSb-EA!ɭoQn.(o,T g(Oـsbe @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ >> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (drift, {})) # Sanity check: delete any non-estimable contrasts for k in cons: if not isestimable(cons[k], X): del(cons[k]) warnings.warn(f"contrast {k} not estimable for this run") # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral # We reproduce the same contrasts as in the data base # outputting an F using both HRFs, as well as the # t using only the first HRF for obj1, obj2 in [('face', 'scrambled'), ('house', 'scrambled'), ('chair', 'scrambled'), ('face', 'house')]: cons[f'{obj1}_vs_{obj2}_F'] = \ np.vstack([cons[f'object_{obj1}_0'] - cons[f'object_{obj2}_0'], cons[f'object_{obj1}_1'] - cons[f'object_{obj2}_1']]) cons[f'{obj1}_vs_{obj2}_t'] = (cons[f'object_{obj1}_0'] - cons[f'object_{obj2}_0']) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array. It is transposed to have # time as the first dimension, i.e. fmri[t] gives the t-th volume. fmri_im = futil.get_fmri(path_info) # an Image fmri_im = rollimg(fmri_im, 't') fmri = fmri_im.get_fdata() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nx, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nx): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth and group voxels by their rounded # ar1 value, fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int_) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix=f'{n}{v}.nii'), dtype=np.float64, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix=f'{n}{v}.nii'), dtype=np.float64, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coordmap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, f'{v}.nii')) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii")) def fixed_effects(subj, design): """ Fixed effects (within subject) for OpenfMRI ds105 model Finds run by run estimated model results, creates fixed effects results image per subject. Parameters ---------- subj : int subject number 1..6 inclusive design : {'standard'} design type """ # First, find all the effect and standard deviation images # for the subject and this design type path_dict = futil.path_info_design(subj, design) rootdir = path_dict['rootdir'] # The output directory fixdir = pjoin(rootdir, "fixed") # Fetch results images from run estimations results = futil.results_table(path_dict) # Get our hands on the relevant coordmap to save our results coordmap = futil.load_image_ds105("_%02d" % subj, "wanatomical.nii").coordmap # Compute the "fixed" effects for each type of contrast for con in results: fixed_effect = 0 fixed_var = 0 for effect, sd in results[con]: effect = load_image(effect).get_fdata() sd = load_image(sd).get_fdata() var = sd ** 2 # The optimal, in terms of minimum variance, combination of the # effects has weights 1 / var # # XXX regions with 0 variance are set to 0 # XXX do we want this or np.nan? ivar = np.nan_to_num(1. / var) fixed_effect += effect * ivar fixed_var += ivar # Now, compute the fixed effects variance and t statistic fixed_sd = np.sqrt(fixed_var) isd = np.nan_to_num(1. / fixed_sd) fixed_t = fixed_effect * isd # Save the results odir = futil.ensure_dir(fixdir, con) for a, n in zip([fixed_effect, fixed_sd, fixed_t], ['effect', 'sd', 't']): im = api.Image(a, copy(coordmap)) save_image(im, pjoin(odir, f'{n}.nii')) def group_analysis(design, contrast): """ Compute group analysis effect, t, sd for `design` and `contrast` Saves to disk in 'group' analysis directory Parameters ---------- design : {'block', 'event'} contrast : str contrast name """ array = np.array # shorthand # Directory where output will be written odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) if len(subj_con_dirs) == 0: raise ValueError(f'No subjects for {design}, {contrast}') # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_fdata()) Ys.append(effect_img.get_fdata()) sd = array(sds) Y = array(Ys) # This function estimates the ratio of the fixed effects variance # (sum(1/sd**2, 0)) to the estimated random effects variance # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance. # The EM algorithm used is described in: # # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H., # Morales, F., Evans, A.C. (2002). \'A general statistical # analysis for fMRI data\'. NeuroImage, 15:1-15 varest = onesample.estimate_varatio(Y, sd) random_var = varest['random'] # XXX - if we have a smoother, use # random_var = varest['fixed'] * smooth(varest['ratio']) # Having estimated the random effects variance (and possibly smoothed it), # the corresponding estimate of the effect and its variance is computed and # saved. # This is the coordmap we will use coordmap = futil.load_image_ds105("fiac_00","wanatomical.nii").coordmap adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) for n in ['effect', 'sd', 't']: im = api.Image(results[n], copy(coordmap)) save_image(im, pjoin(odir, f"{n}.nii")) def group_analysis_signs(design, contrast, mask, signs=None): """ Refit the EM model with a vector of signs. Used in the permutation tests. Returns the maximum of the T-statistic within mask Parameters ---------- design: one of 'block', 'event' contrast: str name of contrast to estimate mask : ``Image`` instance or array-like image containing mask, or array-like signs: ndarray, optional Defaults to np.ones. Should have shape (*,nsubj) where nsubj is the number of effects combined in the group analysis. Returns ------- minT: np.ndarray, minima of T statistic within mask, one for each vector of signs maxT: np.ndarray, maxima of T statistic within mask, one for each vector of signs """ if api.is_image(mask): maska = mask.get_fdata() else: maska = np.asarray(mask) maska = maska.astype(np.bool_) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_fdata()[maska]) Ys.append(effect_img.get_fdata()[maska]) sd = np.array(sds) Y = np.array(Ys) if signs is None: signs = np.ones((1, Y.shape[0])) maxT = np.empty(signs.shape[0]) minT = np.empty(signs.shape[0]) for i, sign in enumerate(signs): signY = sign[:,np.newaxis] * Y varest = onesample.estimate_varatio(signY, sd) random_var = varest['random'] adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) T = results['t'] minT[i], maxT[i] = np.nanmin(T), np.nanmax(T) return minT, maxT def permutation_test(design, contrast, mask=GROUP_MASK, nsample=1000): """ Perform a permutation (sign) test for a given design type and contrast. It is a Monte Carlo test because we only sample nsample possible sign arrays. Parameters ---------- design: str one of ['block', 'event'] contrast : str name of contrast to estimate mask : ``Image`` instance or array-like, optional image containing mask, or array-like nsample: int, optional number of permutations Returns ------- min_vals: np.ndarray max_vals: np.ndarray """ subj_con_dirs = futil.subj_des_con_dirs(design, contrast) nsubj = len(subj_con_dirs) if nsubj == 0: raise ValueError(f'No subjects have {design}, {contrast}') signs = 2*np.greater(np.random.sample(size=(nsample, nsubj)), 0.5) - 1 min_vals, max_vals = group_analysis_signs(design, contrast, mask, signs) return min_vals, max_vals def run_run_models(subject_nos=SUBJECTS, run_nos = RUNS): """ Simple serial run of all the within-run models """ for subj in subject_nos: for run in run_nos: try: run_model(subj, run) except OSError: print('Skipping subject %d, run %d' % (subj, run)) def run_fixed_models(subject_nos=SUBJECTS, designs=DESIGNS): """ Simple serial run of all the within-subject models """ for subj in subject_nos: for design in designs: try: fixed_effects(subj, design) except OSError: print('Skipping subject %d, design %s' % (subj, design)) def run_group_models(designs=DESIGNS, contrasts=CONTRASTS): """ Simple serial run of all the across-subject models """ for design in designs: for contrast in contrasts: group_analysis(design, contrast) if __name__ == '__main__': pass # Sanity check while debugging #permutation_test('block','sentence_0',mask=TINY_MASK,nsample=3) nipy-0.6.1/examples/ds105/ds105_util.py000066400000000000000000000214401470056100100174050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Support utilities for ds105 example, mostly path management. The purpose of separating these is to keep the main example code as readable as possible and focused on the experimental modeling and analysis, rather than on local file management issues. Requires matplotlib """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import os from os import listdir, makedirs from os.path import abspath, exists, isdir, splitext from os.path import join as pjoin # Third party import numpy as np import pandas as pd # From NIPY from nipy.io.api import load_image def csv2rec(fname): return pd.read_csv(fname).to_records() #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # We assume that there is a directory holding the data and it's local to this # code. Users can either keep a copy here or a symlink to the real location on # disk of the data. DATADIR = 'ds105_data' # Sanity check if not os.path.isdir(DATADIR): e=f"The data directory {DATADIR} must exist and contain the ds105 data." raise OSError(e) #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- # Path management utilities def load_image_ds105(*path): """Return a NIPY image from a set of path components. """ return load_image(pjoin(DATADIR, *path)) def subj_des_con_dirs(design, contrast, subjects=range(1,7)): """Return a list of subject directories with this `design` and `contrast` Parameters ---------- design : {'standard'} contrast : str subjects : list, optional which subjects Returns ------- con_dirs : list list of directories matching `design` and `contrast` """ rootdir = DATADIR con_dirs = [] for s in subjects: f = pjoin(rootdir, "sub%03d" % s, "model", design, "fixed", contrast) if isdir(f): con_dirs.append(f) return con_dirs def path_info_run(subj, run, design='standard'): """Construct path information dict for current subject/run. Parameters ---------- subj : int subject number (1..6 inclusive) run : int run number (1..12 inclusive). design : str, optional which design to use, defaults to 'standard' Returns ------- path_dict : dict a dict with all the necessary path-related keys, including 'rootdir', and 'design', where 'design' can have values 'event' or 'block' depending on which type of run this was for subject no `subj` and run no `run` """ path_dict = {'subj': subj, 'run': run, 'design':design} rootdir = pjoin(DATADIR, "sub%(subj)03d", "model", "%(design)s") % path_dict path_dict['rootdir'] = rootdir path_dict['fsldir'] = pjoin(DATADIR, "sub%(subj)03d", "model", "model001") % path_dict return path_dict def path_info_design(subj, design): """Construct path information dict for subject and design. Parameters ---------- subj : int subject number (1..6 inclusive) design : {'standard'} type of design Returns ------- path_dict : dict having keys 'rootdir', 'subj', 'design' """ path_dict = {'subj': subj, 'design': design} rootdir = pjoin(DATADIR, "sub%(subj)03d", "model", "%(design)s") % path_dict path_dict['rootdir'] = rootdir path_dict['fsldir'] = pjoin(DATADIR, "sub%(subj)03d", "model", "model001") % path_dict return path_dict def results_table(path_dict): """ Return precalculated results images for subject info in `path_dict` Parameters ---------- path_dict : dict containing key 'rootdir' Returns ------- rtab : dict dict with keys given by run directories for this subject, values being a list with filenames of effect and sd images. """ # Which runs correspond to this design type? rootdir = path_dict['rootdir'] runs = filter(lambda f: isdir(pjoin(rootdir, f)), ['results_run%03d' % i for i in range(1,13)] ) # Find out which contrasts have t-statistics, # storing the filenames for reading below results = {} for rundir in runs: rundir = pjoin(rootdir, rundir) for condir in listdir(rundir): for stat in ['sd', 'effect']: fname_effect = abspath(pjoin(rundir, condir, 'effect.nii')) fname_sd = abspath(pjoin(rundir, condir, 'sd.nii')) if exists(fname_effect) and exists(fname_sd): results.setdefault(condir, []).append([fname_effect, fname_sd]) return results def get_experiment(path_dict): """Get the record arrays for the experimental design. Parameters ---------- path_dict : dict containing key 'rootdir', 'run', 'subj' Returns ------- experiment, initial : Two record arrays. """ # The following two lines read in the .csv files # and return recarrays, with fields # experiment: ['time', 'sentence', 'speaker'] # initial: ['time', 'initial'] rootdir = path_dict['rootdir'] if not exists(pjoin(rootdir, "experiment_run%(run)03d.csv") % path_dict): e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict raise OSError(e) experiment = csv2rec(pjoin(rootdir, "experiment_run%(run)03d.csv") % path_dict) return experiment def get_fmri(path_dict): """Get the images for a given subject/run. Parameters ---------- path_dict : dict containing key 'fsldir', 'run' Returns ------- fmri : ndarray anat : NIPY image """ fmri_im = load_image( pjoin("%(fsldir)s/task001_run%(run)03d.feat/filtered_func_data.nii.gz") % path_dict) return fmri_im def ensure_dir(*path): """Ensure a directory exists, making it if necessary. Returns the full path.""" dirpath = pjoin(*path) if not isdir(dirpath): makedirs(dirpath) return dirpath def output_dir(path_dict, tcons, fcons): """Get (and make if necessary) directory to write output into. Parameters ---------- path_dict : dict containing key 'rootdir', 'run' tcons : sequence of str t contrasts fcons : sequence of str F contrasts """ rootdir = path_dict['rootdir'] odir = pjoin(rootdir, "results_run%(run)03d" % path_dict) ensure_dir(odir) for n in tcons: ensure_dir(odir,n) for n in fcons: ensure_dir(odir,n) return odir def compare_results(subj, run, other_root, mask_fname): """ Find and compare calculated results images from a previous run This script checks that another directory containing results of this same analysis are similar in the sense of numpy ``allclose`` within a brain mask. Parameters ---------- subj : int subject number (1..6) run : int run number (1..12) other_root : str path to previous run estimation mask_fname: path to a mask image defining area in which to compare differences """ # Get information for this subject and run path_dict = path_info_run(subj, run) # Get mask msk = load_image(mask_fname).get_fdata().copy().astype(bool) # Get results directories for this run rootdir = path_dict['rootdir'] res_dir = pjoin(rootdir, 'results_run%03d' % run) if not isdir(res_dir): return for dirpath, dirnames, filenames in os.walk(res_dir): for fname in filenames: froot, ext = splitext(fname) if froot in ('effect', 'sd', 'F', 't'): this_fname = pjoin(dirpath, fname) other_fname = this_fname.replace(DATADIR, other_root) if not exists(other_fname): print(this_fname, 'present but ', other_fname, 'missing') continue this_arr = load_image(this_fname).get_fdata() other_arr = load_image(other_fname).get_fdata() ok = np.allclose(this_arr[msk], other_arr[msk]) if not ok and froot in ('effect', 'sd', 't'): # Maybe a sign flip ok = np.allclose(this_arr[msk], -other_arr[msk]) if not ok: print('Difference between', this_fname, other_fname) def compare_all(other_root, mask_fname): """ Run results comparison for all subjects and runs """ for subj in range(1,7): for run in range(1, 13): compare_results(subj, run, other_root, mask_fname) nipy-0.6.1/examples/ds105/parallel_run.py000066400000000000000000000066411470056100100202020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to run the main analyses in parallel, using the IPython machinery. See ``ds105_example.py``. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import numpy as np from IPython import parallel #----------------------------------------------------------------------------- # Utility functions #----------------------------------------------------------------------------- _client = None def setup_client(): """Get a Client and initialize it. This assumes that all nodes see a shared filesystem. """ global _client if _client is None: _client = parallel.Client() mydir = os.path.split(os.path.abspath(__file__))[0] def cd(path): import os os.chdir(path) _client[:].apply_sync(cd, mydir) return _client def getruns(): for i in range(1,7): for j in range(1,13): yield i, j def getvals(): for con in ['house_vs_scrambled_t', 'chair_vs_scrambled_t', 'face_vs_scrambled_t', 'face_vs_house_t']: for design in ['standard']: yield design, con #----------------------------------------------------------------------------- # Main analysis functions #----------------------------------------------------------------------------- def fitruns(): """Run the basic model fit.""" rc = setup_client() view = rc.load_balanced_view() i_s, j_s = zip(*getruns()) def _fit(subj, run): import fiac_example try: return fiac_example.run_model(subj, run) except OSError: pass return view.map(_fit, i_s, j_s) def fitfixed(): """Run the fixed effects analysis for all subjects.""" rc = setup_client() view = rc.load_balanced_view() subjects = range(16) def _fit(subject): import fiac_example try: fiac_example.fixed_effects(subject, "block") except OSError: pass try: fiac_example.fixed_effects(subject, "event") except OSError: pass return view.map(_fit, subjects) def fitgroup(): """Run the group analysis""" rc = setup_client() view = rc.load_balanced_view() d_s, c_s = zip(*getvals()) def _fit(d, c): import fiac_example return fiac_example.group_analysis(d, c) return view.map(_fit, d_s, c_s) def run_permute_test(design, contrast, nsample=1000): rc = setup_client() dview = rc[:] nnod = len(dview) # Samples per node. Round up ns_nod = np.ceil(nsample / float(nnod)) def _run_test(n, des, con): import fiac_example from fiac_example import GROUP_MASK min_vals, max_vals = fiac_example.permutation_test(des, con, GROUP_MASK, n) return min_vals, max_vals ar = dview.apply_async(_run_test, ns_nod, design, contrast) min_vals, max_vals = zip(*list(ar)) return np.concatenate(min_vals), np.concatenate(max_vals) #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': pass nipy-0.6.1/examples/ds105/view_contrasts_3d.py000077500000000000000000000047421470056100100211650ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """A quick and dirty example of using Mayavi to overlay anatomy and activation. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import numpy as np try: from mayavi import mlab except ImportError: try: from enthought.mayavi import mlab except ImportError: raise RuntimeError('Need mayavi for this module') from ds105_util import load_image_ds105 #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- MASK = load_image_fiac('group', 'mask.nii') AVGANAT = load_image_fiac('group', 'avganat.nii') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def view_thresholdedT(design, contrast, threshold, inequality=np.greater): """ A mayavi isosurface view of thresholded t-statistics Parameters ---------- design : {'standard'} contrast : str threshold : float inequality : {np.greater, np.less}, optional """ maska = np.asarray(MASK) tmap = np.array(load_image_ds105('group', design, contrast, 't.nii')) test = inequality(tmap, threshold) tval = np.zeros(tmap.shape) tval[test] = tmap[test] # XXX make the array axes agree with mayavi2 avganata = np.array(AVGANAT) avganat_iso = mlab.contour3d(avganata * maska, opacity=0.3, contours=[3600], color=(0.8,0.8,0.8)) avganat_iso.actor.property.backface_culling = True avganat_iso.actor.property.ambient = 0.3 tval_iso = mlab.contour3d(tval * MASK, color=(0.8,0.3,0.3), contours=[threshold]) return avganat_iso, tval_iso #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': # A simple example use case design = 'standard' contrast = 'house_vs_scrambled_t' threshold = 0.3 print('Starting thresholded view with:') print('Design=', design, 'contrast=', contrast, 'threshold=', threshold) view_thresholdedT(design, contrast, threshold) nipy-0.6.1/examples/fiac/000077500000000000000000000000001470056100100152075ustar00rootroot00000000000000nipy-0.6.1/examples/fiac/README.txt000066400000000000000000000025761470056100100167170ustar00rootroot00000000000000====================================== Analyzing the FIAC dataset with NIPY ====================================== This directory contains a set of scripts to complete an analysis of the Functional Image Analysis Contest (FIAC) dataset. The FIAC was conducted as part of the 11th Annual Meeting of the Organization for Human Brain Mapping (Toronto, 2005). For more information on the dataset, see [1]. In order to run the examples in this directory, you will need a copy of the curated data. We haven't yet succeeded in licensing this data for full release. Please see the latest version of this file on github for the current link to the data: https://github.com/nipy/nipy/blob/master/examples/fiac/README.txt ToDo ==== - Provide the raw data repository, with design csv files. - Integrate the scripts for curating the raw data. - Separate input from output directories. - Change ':' in contrast directory names to - or something else, as ':' is not a valid character in directory names under Windows and OSX. .. _here: http://FIXME/MISSING/DATA/ACCESS .. [1] Dehaene-Lambertz G, Dehaene S, Anton JL, Campagne A, Ciuciu P, Dehaene G, Denghien I, Jobert A, LeBihan D, Sigman M, Pallier C, Poline JB. Functional segregation of cortical language areas by sentence repetition. Hum Brain Mapp. 2006;27:360–371. http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2653076#R11 nipy-0.6.1/examples/fiac/fiac_example.py000066400000000000000000000451221470056100100202020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Example analyzing the FIAC dataset with NIPY. * Single run models with per-voxel AR(1). * Cross-run, within-subject models with optimal effect estimates. * Cross-subject models using fixed / random effects variance ratios. * Permutation testing for inference on cross-subject result. See ``parallel_run.py`` for a rig to run these analysis in parallel using the IPython parallel machinery. This script needs the pre-processed FIAC data. See ``README.txt`` and ``fiac_util.py`` for details. See ``examples/labs/need_data/first_level_fiac.py`` for an alternative approach to some of these analyses. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import warnings from copy import copy from os.path import join as pjoin from tempfile import NamedTemporaryFile # Local import fiac_util as futil # Third party import numpy as np from nipy.algorithms.statistics import onesample # From NIPY from nipy.algorithms.statistics.api import ARModel, OLSModel, isestimable, make_recarray from nipy.core import api from nipy.core.api import Image from nipy.core.image.image import rollimg from nipy.io.api import load_image, save_image from nipy.modalities.fmri import design, hrf from nipy.modalities.fmri.fmristat import hrf as delay reload(futil) # while developing interactively #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- SUBJECTS = tuple(range(5) + range(6, 16)) # No data for subject 5 RUNS = tuple(range(1, 5)) DESIGNS = ('event', 'block') CONTRASTS = ('speaker_0', 'speaker_1', 'sentence_0', 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1') GROUP_MASK = futil.load_image_fiac('group', 'mask.nii') TINY_MASK = np.zeros(GROUP_MASK.shape, np.bool_) TINY_MASK[30:32,40:42,30:32] = 1 #----------------------------------------------------------------------------- # Public functions #----------------------------------------------------------------------------- # For group analysis def run_model(subj, run): """ Single subject fitting of FIAC model """ #---------------------------------------------------------------------- # Set initial parameters of the FIAC dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 191 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each volume in the fMRI data volume_times = np.arange(nvol) * TR + Tstart # This recarray of times has one column named 't'. It is used in the # function design.event_design to create the design matrices. volume_times_rec = make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data relevant # to this subject/run path_info = futil.path_info_run(subj,run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original FIAC-supplied format into something where the # factorial structure of the design is more explicit. This has already # been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment, initial = futil.get_experiment_initial(path_info) # Create design matrices for the "initial" and "experiment" factors, saving # the default contrasts. # The function event_design will create design matrices, which in the case # of "experiment" will have num_columns = (# levels of speaker) * (# levels # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1. # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described # in: # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrast definitions in ``cons_exper`` are a dictionary with keys # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0', # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the # four default contrasts: constant, main effects + interactions, each # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0 # is the interaction of sentence and speaker convolved with the first (=0) # of the two HRF basis functions, and sentence:speaker_1 is the interaction # convolved with the second (=1) of the basis functions. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral) # The contrasts for 'initial' are ignored as they are "uninteresting" and # are included in the model as confounds. X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) # In addition to factors, there is typically a "drift" term. In this case, # the drift is a natural cubic spline with a not at the midpoint # (volume_times.mean()) vt = volume_times # shorthand drift = np.array( [vt**i for i in range(4)] + [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] ) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_initial and X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}), (drift, {})) # Sanity check: delete any non-estimable contrasts for k in cons: if not isestimable(cons[k], X): del(cons[k]) warnings.warn(f"contrast {k} not estimable for this run") # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral cons['speaker'] = np.vstack([cons['speaker_0'], cons['speaker_1']]) cons['sentence'] = np.vstack([cons['sentence_0'], cons['sentence_1']]) cons['sentence:speaker'] = np.vstack([cons['sentence:speaker_0'], cons['sentence:speaker_1']]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array. It is transposed to have # time as the first dimension, i.e. fmri[t] gives the t-th volume. fmri_im = futil.get_fmri(path_info) # an Image fmri_im = rollimg(fmri_im, 't') fmri = fmri_im.get_fdata() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nx, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nx): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth and group voxels by their rounded # ar1 value, fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int_) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix=f'{n}{v}.nii'), dtype=np.float64, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix=f'{n}{v}.nii'), dtype=np.float64, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coordmap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, f'{v}.nii')) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii")) def fixed_effects(subj, design): """ Fixed effects (within subject) for FIAC model Finds run by run estimated model results, creates fixed effects results image per subject. Parameters ---------- subj : int subject number 0..15 inclusive design : {'block', 'event'} design type """ # First, find all the effect and standard deviation images # for the subject and this design type path_dict = futil.path_info_design(subj, design) rootdir = path_dict['rootdir'] # The output directory fixdir = pjoin(rootdir, "fixed") # Fetch results images from run estimations results = futil.results_table(path_dict) # Get our hands on the relevant coordmap to save our results coordmap = futil.load_image_fiac("fiac_%02d" % subj, "wanatomical.nii").coordmap # Compute the "fixed" effects for each type of contrast for con in results: fixed_effect = 0 fixed_var = 0 for effect, sd in results[con]: effect = load_image(effect).get_fdata() sd = load_image(sd).get_fdata() var = sd ** 2 # The optimal, in terms of minimum variance, combination of the # effects has weights 1 / var # # XXX regions with 0 variance are set to 0 # XXX do we want this or np.nan? ivar = np.nan_to_num(1. / var) fixed_effect += effect * ivar fixed_var += ivar # Now, compute the fixed effects variance and t statistic fixed_sd = np.sqrt(fixed_var) isd = np.nan_to_num(1. / fixed_sd) fixed_t = fixed_effect * isd # Save the results odir = futil.ensure_dir(fixdir, con) for a, n in zip([fixed_effect, fixed_sd, fixed_t], ['effect', 'sd', 't']): im = api.Image(a, copy(coordmap)) save_image(im, pjoin(odir, f'{n}.nii')) def group_analysis(design, contrast): """ Compute group analysis effect, t, sd for `design` and `contrast` Saves to disk in 'group' analysis directory Parameters ---------- design : {'block', 'event'} contrast : str contrast name """ array = np.array # shorthand # Directory where output will be written odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) if len(subj_con_dirs) == 0: raise ValueError(f'No subjects for {design}, {contrast}') # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_fdata()) Ys.append(effect_img.get_fdata()) sd = array(sds) Y = array(Ys) # This function estimates the ratio of the fixed effects variance # (sum(1/sd**2, 0)) to the estimated random effects variance # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance. # The EM algorithm used is described in: # # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H., # Morales, F., Evans, A.C. (2002). \'A general statistical # analysis for fMRI data\'. NeuroImage, 15:1-15 varest = onesample.estimate_varatio(Y, sd) random_var = varest['random'] # XXX - if we have a smoother, use # random_var = varest['fixed'] * smooth(varest['ratio']) # Having estimated the random effects variance (and possibly smoothed it), # the corresponding estimate of the effect and its variance is computed and # saved. # This is the coordmap we will use coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) for n in ['effect', 'sd', 't']: im = api.Image(results[n], copy(coordmap)) save_image(im, pjoin(odir, f"{n}.nii")) def group_analysis_signs(design, contrast, mask, signs=None): """ Refit the EM model with a vector of signs. Used in the permutation tests. Returns the maximum of the T-statistic within mask Parameters ---------- design: one of 'block', 'event' contrast: str name of contrast to estimate mask : ``Image`` instance or array-like image containing mask, or array-like signs: ndarray, optional Defaults to np.ones. Should have shape (*,nsubj) where nsubj is the number of effects combined in the group analysis. Returns ------- minT: np.ndarray, minima of T statistic within mask, one for each vector of signs maxT: np.ndarray, maxima of T statistic within mask, one for each vector of signs """ if api.is_image(mask): maska = mask.get_fdata() else: maska = np.asarray(mask) maska = maska.astype(np.bool_) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_fdata()[maska]) Ys.append(effect_img.get_fdata()[maska]) sd = np.array(sds) Y = np.array(Ys) if signs is None: signs = np.ones((1, Y.shape[0])) maxT = np.empty(signs.shape[0]) minT = np.empty(signs.shape[0]) for i, sign in enumerate(signs): signY = sign[:,np.newaxis] * Y varest = onesample.estimate_varatio(signY, sd) random_var = varest['random'] adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) T = results['t'] minT[i], maxT[i] = np.nanmin(T), np.nanmax(T) return minT, maxT def permutation_test(design, contrast, mask=GROUP_MASK, nsample=1000): """ Perform a permutation (sign) test for a given design type and contrast. It is a Monte Carlo test because we only sample nsample possible sign arrays. Parameters ---------- design: str one of ['block', 'event'] contrast : str name of contrast to estimate mask : ``Image`` instance or array-like, optional image containing mask, or array-like nsample: int, optional number of permutations Returns ------- min_vals: np.ndarray max_vals: np.ndarray """ subj_con_dirs = futil.subj_des_con_dirs(design, contrast) nsubj = len(subj_con_dirs) if nsubj == 0: raise ValueError(f'No subjects have {design}, {contrast}') signs = 2*np.greater(np.random.sample(size=(nsample, nsubj)), 0.5) - 1 min_vals, max_vals = group_analysis_signs(design, contrast, mask, signs) return min_vals, max_vals def run_run_models(subject_nos=SUBJECTS, run_nos = RUNS): """ Simple serial run of all the within-run models """ for subj in subject_nos: for run in run_nos: try: run_model(subj, run) except OSError: print('Skipping subject %d, run %d' % (subj, run)) def run_fixed_models(subject_nos=SUBJECTS, designs=DESIGNS): """ Simple serial run of all the within-subject models """ for subj in subject_nos: for design in designs: try: fixed_effects(subj, design) except OSError: print('Skipping subject %d, design %s' % (subj, design)) def run_group_models(designs=DESIGNS, contrasts=CONTRASTS): """ Simple serial run of all the across-subject models """ for design in designs: for contrast in contrasts: group_analysis(design, contrast) if __name__ == '__main__': pass # Sanity check while debugging #permutation_test('block','sentence_0',mask=TINY_MASK,nsample=3) nipy-0.6.1/examples/fiac/fiac_hashes.txt000066400000000000000000000326271470056100100202170ustar00rootroot00000000000000MD5 hashes for FIAC preprocessed data ------------------------------------- This also gives the directory structure that ``fiac_example.py`` needs. fiac_data/fiac_00/block/experiment_01.csv 6a9ebdefe6a72657f7c68533c124df39 fiac_data/fiac_00/block/experiment_02.csv 49731ababbc87465b6481ddf8a2d8664 fiac_data/fiac_00/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_00/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_00/block/swafunctional_01.nii cdbed16524732ec22d5888a1be82d1c4 fiac_data/fiac_00/block/swafunctional_02.nii e1235803f692d5111e4d79fa16fd1ed5 fiac_data/fiac_00/event/experiment_03.csv 33e347c5ed13484df9ab9bedf855e8e2 fiac_data/fiac_00/event/experiment_04.csv ca84e9f6394d767aff68346be3265ab7 fiac_data/fiac_00/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_00/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_00/event/swafunctional_03.nii 4a00580a881f3aa4260f3ceaac188c21 fiac_data/fiac_00/event/swafunctional_04.nii 4b3e32342ca90daffe14017644ba992a fiac_data/fiac_01/block/experiment_01.csv 6a9ebdefe6a72657f7c68533c124df39 fiac_data/fiac_01/block/experiment_02.csv 49731ababbc87465b6481ddf8a2d8664 fiac_data/fiac_01/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_01/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_01/block/swafunctional_01.nii 38afd035e6e60689c270fdaa8d456bf9 fiac_data/fiac_01/block/swafunctional_02.nii ce9c068913a89c5fee4bfa26f8417484 fiac_data/fiac_01/event/experiment_03.csv 33e347c5ed13484df9ab9bedf855e8e2 fiac_data/fiac_01/event/experiment_04.csv ca84e9f6394d767aff68346be3265ab7 fiac_data/fiac_01/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_01/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_01/event/swafunctional_03.nii 65430fd882511cbfdc461c8654f43c08 fiac_data/fiac_01/event/swafunctional_04.nii 8c419ff788218d8dc8475b4d17fa5614 fiac_data/fiac_02/block/experiment_01.csv 44e14d55f06b5aa6274e9b8e14e7f34d fiac_data/fiac_02/block/experiment_02.csv d9715937067d98627faf4eed79bf4df6 fiac_data/fiac_02/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_02/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_02/block/swafunctional_01.nii 00def42c41f3d1b6bf7956f30d3ca78e fiac_data/fiac_02/block/swafunctional_02.nii 347ef8d217f6ef7eeaeb29e92ca3634a fiac_data/fiac_02/event/experiment_03.csv 7b97248a3e3ff3a63fc7b2ea54541ab0 fiac_data/fiac_02/event/experiment_04.csv 8e52d16c9ef91d3607945338f38dbdd8 fiac_data/fiac_02/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_02/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_02/event/swafunctional_03.nii 1d0739396a855ef90ff89b5033f37fad fiac_data/fiac_02/event/swafunctional_04.nii a419c28db72197945fc632c09bc1868a fiac_data/fiac_03/block/experiment_03.csv b173ed72bcd82067f69964126c086335 fiac_data/fiac_03/block/experiment_04.csv 7637ac98ec67c5185de87d9f082f7bc5 fiac_data/fiac_03/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_03/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_03/block/swafunctional_03.nii 635aeebbf5fe60959b680912ea330cbd fiac_data/fiac_03/block/swafunctional_04.nii 904693e5b1d87ee02b612c28c2d0e4e8 fiac_data/fiac_03/event/experiment_01.csv f978b60749ecacb69cc4591123a87be5 fiac_data/fiac_03/event/experiment_02.csv 8eab700098c629378213c396822fc002 fiac_data/fiac_03/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_03/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_03/event/swafunctional_01.nii 480dba56a8e282897d8476e14e5b1c6b fiac_data/fiac_03/event/swafunctional_02.nii 6b984334dd5ddb246c8edcbece436e2c fiac_data/fiac_04/block/experiment_02.csv 5a25f02cb9b2f50d2a0b4b427faea2f6 fiac_data/fiac_04/block/experiment_03.csv 862dc60967c120915d0126df5a961b2d fiac_data/fiac_04/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_04/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_04/block/swafunctional_02.nii e0d62ac3f71f651bfa9e4f3484967273 fiac_data/fiac_04/block/swafunctional_03.nii 5219d9a597b78a69fb3b9d999e028b08 fiac_data/fiac_04/event/experiment_01.csv 8dd9bfa3644c30f42f3a3678e6ec5102 fiac_data/fiac_04/event/experiment_04.csv 05fe3c5bec4ebe5247ca23cc0b153012 fiac_data/fiac_04/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_04/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_04/event/swafunctional_01.nii 0c46c07f14fbb25fb61014c7b1472c84 fiac_data/fiac_04/event/swafunctional_04.nii d64197691aec027c7b9d920e28aecce1 fiac_data/fiac_05/block/experiment_02.csv b165c1276fe094ade2cf47db3df6c036 fiac_data/fiac_05/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_05/event/experiment_01.csv 770d517d8022cb5ed39cfb3b38371308 fiac_data/fiac_05/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/block/experiment_02.csv 907008500bcbf8204790e5138fab8bd7 fiac_data/fiac_06/block/experiment_03.csv 1c496dd1e8892384a701cbfe44492901 fiac_data/fiac_06/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_06/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_06/block/swafunctional_02.nii 803f2e754bcda3ff3170f1f39c44ffac fiac_data/fiac_06/block/swafunctional_03.nii f9eebfa39fdac1b16ebcc0dd085c1562 fiac_data/fiac_06/event/experiment_01.csv 129786bb621f0214f56993179f3ed40e fiac_data/fiac_06/event/experiment_04.csv 8f19a310d5abb8c876a4ca1f2b20cefd fiac_data/fiac_06/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/event/swafunctional_01.nii 3eddd593ed9e97cdc6ee94f4337fcf09 fiac_data/fiac_06/event/swafunctional_04.nii 0255761003a9b9a9c0d1d22d9c2b30c8 fiac_data/fiac_07/block/experiment_02.csv b2054da6001d926507b3c630ba7914db fiac_data/fiac_07/block/experiment_03.csv e3dea6bbcbe67f11710837fcfbb4b47e fiac_data/fiac_07/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_07/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_07/block/swafunctional_02.nii a711e931b7ba336cd513d2219480eefc fiac_data/fiac_07/block/swafunctional_03.nii 93861d5c563f68c6c80f1aa8f30af994 fiac_data/fiac_07/event/experiment_04.csv daf6114730ec53169f181f680c4820e3 fiac_data/fiac_07/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_07/event/swafunctional_04.nii 18b881b94ea8a7970e056d8b1338b840 fiac_data/fiac_08/block/experiment_01.csv 9bd851a905f35ae11af7881659953e34 fiac_data/fiac_08/block/experiment_03.csv d0353b3d229f07e3055893addd4f1c3f fiac_data/fiac_08/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_08/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_08/block/swafunctional_01.nii ac8a7f3c49255cdbfff13caa79913917 fiac_data/fiac_08/block/swafunctional_03.nii b4bf244de40dd1a5aabd4acbc38afbb0 fiac_data/fiac_08/event/experiment_02.csv b94c07427f4b265d6ffa073448444aea fiac_data/fiac_08/event/experiment_04.csv b3f1005432a6cb58a78c8694d9232a18 fiac_data/fiac_08/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_08/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_08/event/swafunctional_02.nii 19b7fb6ada363d5d11e55ebe0c75203c fiac_data/fiac_08/event/swafunctional_04.nii bb1368611872f012a27c0b1ffe72d5e2 fiac_data/fiac_09/block/experiment_01.csv 3d49cd07b5ffa8d1692526572c396114 fiac_data/fiac_09/block/experiment_03.csv d7ef56ef814cb3f10dc57c00a8514600 fiac_data/fiac_09/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_09/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_09/block/swafunctional_01.nii 269e31e30913a07262db0577450ae276 fiac_data/fiac_09/block/swafunctional_03.nii 24eb37510ffe2fcc11bac201908d888b fiac_data/fiac_09/event/experiment_02.csv dcf026465df7bf5e02f6e91e430ce3b3 fiac_data/fiac_09/event/experiment_04.csv 6dcb72473920410c93c6e5fb584a3b0c fiac_data/fiac_09/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_09/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_09/event/swafunctional_02.nii 3de018c0c9aac8d8f4831bde6d14d2d6 fiac_data/fiac_09/event/swafunctional_04.nii f3a54581cd9ece5708b03c18aef0dcda fiac_data/fiac_10/block/experiment_01.csv 79d366f5ad8e2baa17571ba90a2d29c8 fiac_data/fiac_10/block/experiment_03.csv 159ffd9e1afa85e5d7f5818913f11255 fiac_data/fiac_10/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_10/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_10/block/swafunctional_01.nii ba76304d62f4458ffefd2bf961866517 fiac_data/fiac_10/block/swafunctional_03.nii 536eb2cd0923ef5f166f708efecd3d22 fiac_data/fiac_10/event/experiment_02.csv 2bd807a649539085005f3441a5d3266f fiac_data/fiac_10/event/experiment_04.csv 4259afa19c1cc1dc9cedaf2bbf6ea39d fiac_data/fiac_10/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_10/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_10/event/swafunctional_02.nii b35550de824147f116df000e5531b64c fiac_data/fiac_10/event/swafunctional_04.nii a4459fdd9540aebf9e4c42fce061d2ed fiac_data/fiac_11/block/experiment_01.csv 3a18ea4be3e6cd8e8c211943a8bc1738 fiac_data/fiac_11/block/experiment_04.csv e6934cf684f72812c916b67aa3b1f806 fiac_data/fiac_11/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_11/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_11/block/swafunctional_01.nii 4fe3f14e75486ee6598142e15d5e8d31 fiac_data/fiac_11/block/swafunctional_04.nii 4a6febb5e860f27e4e73e0ae050d729b fiac_data/fiac_11/event/experiment_02.csv b3f1005432a6cb58a78c8694d9232a18 fiac_data/fiac_11/event/experiment_03.csv 269c94a12854a833e380116a51f4a6d8 fiac_data/fiac_11/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_11/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_11/event/swafunctional_02.nii 6e0ee7061065231996cbdbe5a0ae194c fiac_data/fiac_11/event/swafunctional_03.nii c28e4ac1a2307acba3b96b0764fd219e fiac_data/fiac_12/block/experiment_01.csv 9e408441dc25d7016d5930608e1dd7a4 fiac_data/fiac_12/block/experiment_04.csv 81f7ebad3ddd40521908586f1775273e fiac_data/fiac_12/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_12/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_12/block/swafunctional_01.nii 872f6e7d6f827efcb29837a0099a0d5c fiac_data/fiac_12/block/swafunctional_04.nii c6e0397579c22fe8ff9b48dafa48b03f fiac_data/fiac_12/event/experiment_02.csv 7423c9d1f6c6b91c54945a135ae3b427 fiac_data/fiac_12/event/experiment_03.csv 7694aae4d34ecd33032b85e285059ab7 fiac_data/fiac_12/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_12/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_12/event/swafunctional_02.nii ccd26a8126bfaa545a521a96377097b0 fiac_data/fiac_12/event/swafunctional_03.nii 6d9a287ad26896eb5b6196b1235814bf fiac_data/fiac_13/block/experiment_01.csv ca46543e0ec61bdfef275d4e140763c8 fiac_data/fiac_13/block/experiment_04.csv d15d5c3cc0eacd4c5117b4640675b001 fiac_data/fiac_13/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_13/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_13/block/swafunctional_01.nii 0cb8ff5a4dbbf03a26eae084ff99d525 fiac_data/fiac_13/block/swafunctional_04.nii b64727ba5608d064a7c111114ff6f5f6 fiac_data/fiac_13/event/experiment_02.csv 915f57a8e6c6e329c65ed30c92ef0f71 fiac_data/fiac_13/event/experiment_03.csv 8c97635901a6552d51486d3e9a08e02f fiac_data/fiac_13/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_13/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_13/event/swafunctional_02.nii 1b18eea773a55e1b54dd2debd736e1a2 fiac_data/fiac_13/event/swafunctional_03.nii 8a2e39596b49f8ae57f936e1f91819f6 fiac_data/fiac_14/block/experiment_02.csv c1f9f84111c88cb3ce66885fa8947e7e fiac_data/fiac_14/block/experiment_04.csv 4ce67a5d04078da9ec20aa10a171147b fiac_data/fiac_14/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_14/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_14/block/swafunctional_02.nii 6774d4a6f933899a44ca3ea4100257a6 fiac_data/fiac_14/block/swafunctional_04.nii a38525d9ae5763a6beac7fb42659d09b fiac_data/fiac_14/event/experiment_01.csv 737b4a4e8b2f3bbc6d4dcddca2063311 fiac_data/fiac_14/event/experiment_03.csv f46cae55a5c6447ba7cdf025ad31afd4 fiac_data/fiac_14/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_14/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_14/event/swafunctional_01.nii 65cc11864f9d51f723f78c529459186f fiac_data/fiac_14/event/swafunctional_03.nii dac38972621b87d2ceeb647ab104a5bc fiac_data/fiac_15/block/experiment_02.csv a1d03527ce83e8f1d91fee407e8866e3 fiac_data/fiac_15/block/experiment_04.csv ee55950cc357518ce39a5f6005251672 fiac_data/fiac_15/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_15/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_15/block/swafunctional_02.nii c078463ffaf91be2b2015ff674364eef fiac_data/fiac_15/block/swafunctional_04.nii 602230469a6b23e0db4881977407faa6 fiac_data/fiac_15/event/experiment_01.csv 4b8dafd3f69b5ad2c791dfbb98f6b622 fiac_data/fiac_15/event/experiment_03.csv a4807223c8cc68e5c39b995cda4f2df1 fiac_data/fiac_15/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_15/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_15/event/swafunctional_01.nii ebf99885709a3d7da35127640b92a467 fiac_data/fiac_15/event/swafunctional_03.nii 19b22372d55ba9849eee46e7e17ffcd2 nipy-0.6.1/examples/fiac/fiac_util.py000066400000000000000000000321161470056100100175230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Support utilities for FIAC example, mostly path management. The purpose of separating these is to keep the main example code as readable as possible and focused on the experimental modeling and analysis, rather than on local file management issues. Requires matplotlib """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import csv import os from io import StringIO # Python 3 from os import listdir, makedirs from os.path import abspath, exists, isdir, splitext from os.path import join as pjoin # Third party import numpy as np import pandas as pd # From NIPY from nipy.io.api import load_image def csv2rec(fname): return pd.read_csv(fname).to_records() def rec2csv(recarr, fname): pd.DataFrame.from_records(recarr).to_csv(fname, index=None) #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # We assume that there is a directory holding the data and it's local to this # code. Users can either keep a copy here or a symlink to the real location on # disk of the data. DATADIR = 'fiac_data' # Sanity check if not os.path.isdir(DATADIR): e=f"The data directory {DATADIR} must exist and contain the FIAC data." raise OSError(e) #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- # Path management utilities def load_image_fiac(*path): """Return a NIPY image from a set of path components. """ return load_image(pjoin(DATADIR, *path)) def subj_des_con_dirs(design, contrast, nsub=16): """Return a list of subject directories with this `design` and `contrast` Parameters ---------- design : {'event', 'block'} contrast : str nsub : int, optional total number of subjects Returns ------- con_dirs : list list of directories matching `design` and `contrast` """ rootdir = DATADIR con_dirs = [] for s in range(nsub): f = pjoin(rootdir, "fiac_%02d" % s, design, "fixed", contrast) if isdir(f): con_dirs.append(f) return con_dirs def path_info_run(subj, run): """Construct path information dict for current subject/run. Parameters ---------- subj : int subject number (0..15 inclusive) run : int run number (1..4 inclusive). Returns ------- path_dict : dict a dict with all the necessary path-related keys, including 'rootdir', and 'design', where 'design' can have values 'event' or 'block' depending on which type of run this was for subject no `subj` and run no `run` """ path_dict = {'subj': subj, 'run': run} if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict): path_dict['design'] = 'block' else: path_dict['design'] = 'event' rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict path_dict['rootdir'] = rootdir return path_dict def path_info_design(subj, design): """Construct path information dict for subject and design. Parameters ---------- subj : int subject number (0..15 inclusive) design : {'event', 'block'} type of design Returns ------- path_dict : dict having keys 'rootdir', 'subj', 'design' """ path_dict = {'subj': subj, 'design': design} rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict path_dict['rootdir'] = rootdir return path_dict def results_table(path_dict): """ Return precalculated results images for subject info in `path_dict` Parameters ---------- path_dict : dict containing key 'rootdir' Returns ------- rtab : dict dict with keys given by run directories for this subject, values being a list with filenames of effect and sd images. """ # Which runs correspond to this design type? rootdir = path_dict['rootdir'] runs = filter(lambda f: isdir(pjoin(rootdir, f)), ['results_%02d' % i for i in range(1,5)] ) # Find out which contrasts have t-statistics, # storing the filenames for reading below results = {} for rundir in runs: rundir = pjoin(rootdir, rundir) for condir in listdir(rundir): for stat in ['sd', 'effect']: fname_effect = abspath(pjoin(rundir, condir, 'effect.nii')) fname_sd = abspath(pjoin(rundir, condir, 'sd.nii')) if exists(fname_effect) and exists(fname_sd): results.setdefault(condir, []).append([fname_effect, fname_sd]) return results def get_experiment_initial(path_dict): """Get the record arrays for the experimental/initial designs. Parameters ---------- path_dict : dict containing key 'rootdir', 'run', 'subj' Returns ------- experiment, initial : Two record arrays. """ # The following two lines read in the .csv files # and return recarrays, with fields # experiment: ['time', 'sentence', 'speaker'] # initial: ['time', 'initial'] rootdir = path_dict['rootdir'] if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict): e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict raise OSError(e) experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict) initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict) return experiment, initial def get_fmri(path_dict): """Get the images for a given subject/run. Parameters ---------- path_dict : dict containing key 'rootdir', 'run' Returns ------- fmri : ndarray anat : NIPY image """ fmri_im = load_image( pjoin("%(rootdir)s/swafunctional_%(run)02d.nii") % path_dict) return fmri_im def ensure_dir(*path): """Ensure a directory exists, making it if necessary. Returns the full path.""" dirpath = pjoin(*path) if not isdir(dirpath): makedirs(dirpath) return dirpath def output_dir(path_dict, tcons, fcons): """Get (and make if necessary) directory to write output into. Parameters ---------- path_dict : dict containing key 'rootdir', 'run' tcons : sequence of str t contrasts fcons : sequence of str F contrasts """ rootdir = path_dict['rootdir'] odir = pjoin(rootdir, "results_%(run)02d" % path_dict) ensure_dir(odir) for n in tcons: ensure_dir(odir,n) for n in fcons: ensure_dir(odir,n) return odir def test_sanity(): import nipy.modalities.fmri.fmristat.hrf as fshrf from nipy.algorithms.statistics import formula from nipy.modalities.fmri import design, hrf from nipy.modalities.fmri.fmristat.tests import FIACdesigns from nipy.modalities.fmri.fmristat.tests.test_FIAC import matchcol """ Single subject fitting of FIAC model """ # Based on file # subj3_evt_fonc1.txt # subj3_bloc_fonc3.txt for subj, run, design_type in [(3, 1, 'event'), (3, 3, 'block')]: nvol = 191 TR = 2.5 Tstart = 1.25 volume_times = np.arange(nvol)*TR + Tstart volume_times_rec = formula.make_recarray(volume_times, 't') path_dict = {'subj':subj, 'run':run} if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict): path_dict['design'] = 'block' else: path_dict['design'] = 'event' experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % path_dict) initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % path_dict) X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=fshrf.spectral) X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {})) # Get original fmristat design Xf = FIACdesigns.fmristat[design_type] # Check our new design can be closely matched to the original for i in range(X.shape[1]): # Columns can be very well correlated negatively or positively assert abs(matchcol(X[:,i], Xf)[1]) > 0.999 def rewrite_spec(subj, run, root = "/home/jtaylo/FIAC-HBM2009"): """ Take a FIAC specification file and get two specifications (experiment, begin). This creates two new .csv files, one for the experimental conditions, the other for the "initial" confounding trials that are to be modelled out. For the block design, the "initial" trials are the first trials of each block. For the event designs, the "initial" trials are made up of just the first trial. """ if exists(pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_evt_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run}): designtype = 'evt' else: designtype = 'bloc' # Fix the format of the specification so it is # more in the form of a 2-way ANOVA eventdict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'} s = StringIO() w = csv.writer(s) w.writerow(['time', 'sentence', 'speaker']) specfile = pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} d = np.loadtxt(specfile) for row in d: w.writerow([row[0]] + eventdict[row[1]].split('_')) s.seek(0) d = csv2rec(s) # Now, take care of the 'begin' event # This is due to the FIAC design if designtype == 'evt': b = np.array([(d[0]['time'], 1)], np.dtype([('time', np.float64), ('initial', np.int_)])) d = d[1:] else: k = np.equal(np.arange(d.shape[0]) % 6, 0) b = np.array([(tt, 1) for tt in d[k]['time']], np.dtype([('time', np.float64), ('initial', np.int_)])) d = d[~k] designtype = {'bloc':'block', 'evt':'event'}[designtype] fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} rec2csv(d, fname) experiment = csv2rec(fname) fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} rec2csv(b, fname) initial = csv2rec(fname) return d, b def compare_results(subj, run, other_root, mask_fname): """ Find and compare calculated results images from a previous run This script checks that another directory containing results of this same analysis are similar in the sense of numpy ``allclose`` within a brain mask. Parameters ---------- subj : int subject number (0..4, 6..15) run : int run number (1..4) other_root : str path to previous run estimation mask_fname: path to a mask image defining area in which to compare differences """ # Get information for this subject and run path_dict = path_info_run(subj, run) # Get mask msk = load_image(mask_fname).get_fdata().copy().astype(bool) # Get results directories for this run rootdir = path_dict['rootdir'] res_dir = pjoin(rootdir, 'results_%02d' % run) if not isdir(res_dir): return for dirpath, dirnames, filenames in os.walk(res_dir): for fname in filenames: froot, ext = splitext(fname) if froot in ('effect', 'sd', 'F', 't'): this_fname = pjoin(dirpath, fname) other_fname = this_fname.replace(DATADIR, other_root) if not exists(other_fname): print(this_fname, 'present but ', other_fname, 'missing') continue this_arr = load_image(this_fname).get_fdata() other_arr = load_image(other_fname).get_fdata() ok = np.allclose(this_arr[msk], other_arr[msk]) if not ok and froot in ('effect', 'sd', 't'): # Maybe a sign flip ok = np.allclose(this_arr[msk], -other_arr[msk]) if not ok: print('Difference between', this_fname, other_fname) def compare_all(other_root, mask_fname): """ Run results comparison for all subjects and runs """ for subj in range(5) + range(6, 16): for run in range(1, 5): compare_results(subj, run, other_root, mask_fname) nipy-0.6.1/examples/fiac/parallel_run.py000066400000000000000000000070071470056100100202450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to run the main analyses in parallel, using the IPython machinery. See ``fiac_example.py``. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import numpy as np from IPython import parallel #----------------------------------------------------------------------------- # Utility functions #----------------------------------------------------------------------------- _client = None def setup_client(): """Get a Client and initialize it. This assumes that all nodes see a shared filesystem. """ global _client if _client is None: _client = parallel.Client() mydir = os.path.split(os.path.abspath(__file__))[0] def cd(path): import os os.chdir(path) _client[:].apply_sync(cd, mydir) return _client def getruns(): for i in range(16): for j in range(1,5): yield i, j def getvals(): for con in ['sentence:speaker_0', 'sentence_1', 'sentence_0', 'sentence:speaker_1', 'speaker_1', 'speaker_0', 'constant_1', 'constant_0']: for design in ['block', 'event']: yield design, con #----------------------------------------------------------------------------- # Main analysis functions #----------------------------------------------------------------------------- def fitruns(): """Run the basic model fit.""" rc = setup_client() view = rc.load_balanced_view() i_s, j_s = zip(*getruns()) def _fit(subj, run): import fiac_example try: return fiac_example.run_model(subj, run) except OSError: pass return view.map(_fit, i_s, j_s) def fitfixed(): """Run the fixed effects analysis for all subjects.""" rc = setup_client() view = rc.load_balanced_view() subjects = range(16) def _fit(subject): import fiac_example try: fiac_example.fixed_effects(subject, "block") except OSError: pass try: fiac_example.fixed_effects(subject, "event") except OSError: pass return view.map(_fit, subjects) def fitgroup(): """Run the group analysis""" rc = setup_client() view = rc.load_balanced_view() d_s, c_s = zip(*getvals()) def _fit(d, c): import fiac_example return fiac_example.group_analysis(d, c) return view.map(_fit, d_s, c_s) def run_permute_test(design, contrast, nsample=1000): rc = setup_client() dview = rc[:] nnod = len(dview) # Samples per node. Round up ns_nod = np.ceil(nsample / float(nnod)) def _run_test(n, des, con): import fiac_example from fiac_example import GROUP_MASK min_vals, max_vals = fiac_example.permutation_test(des, con, GROUP_MASK, n) return min_vals, max_vals ar = dview.apply_async(_run_test, ns_nod, design, contrast) min_vals, max_vals = zip(*list(ar)) return np.concatenate(min_vals), np.concatenate(max_vals) #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': pass nipy-0.6.1/examples/fiac/view_contrasts_3d.py000077500000000000000000000047301470056100100212300ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """A quick and dirty example of using Mayavi to overlay anatomy and activation. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import numpy as np try: from mayavi import mlab except ImportError: try: from enthought.mayavi import mlab except ImportError: raise RuntimeError('Need mayavi for this module') from fiac_util import load_image_fiac #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- MASK = load_image_fiac('group', 'mask.nii') AVGANAT = load_image_fiac('group', 'avganat.nii') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def view_thresholdedT(design, contrast, threshold, inequality=np.greater): """ A mayavi isosurface view of thresholded t-statistics Parameters ---------- design : {'block', 'event'} contrast : str threshold : float inequality : {np.greater, np.less}, optional """ maska = np.asarray(MASK) tmap = np.array(load_image_fiac('group', design, contrast, 't.nii')) test = inequality(tmap, threshold) tval = np.zeros(tmap.shape) tval[test] = tmap[test] # XXX make the array axes agree with mayavi2 avganata = np.array(AVGANAT) avganat_iso = mlab.contour3d(avganata * maska, opacity=0.3, contours=[3600], color=(0.8,0.8,0.8)) avganat_iso.actor.property.backface_culling = True avganat_iso.actor.property.ambient = 0.3 tval_iso = mlab.contour3d(tval * MASK, color=(0.8,0.3,0.3), contours=[threshold]) return avganat_iso, tval_iso #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': # A simple example use case design = 'block' contrast = 'sentence_0' threshold = 0.3 print('Starting thresholded view with:') print('Design=', design, 'contrast=', contrast, 'threshold=', threshold) view_thresholdedT(design, contrast, threshold) nipy-0.6.1/examples/formula/000077500000000000000000000000001470056100100157525ustar00rootroot00000000000000nipy-0.6.1/examples/formula/fir.py000077500000000000000000000036241470056100100171140ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of FIR model using formula framework Shows how to use B splines as basis functions for the FIR instead of simple boxcars. Requires matplotlib """ import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from sympy.utilities.lambdify import implemented_function from nipy.algorithms.statistics.api import Formula from nipy.modalities.fmri import utils def linBspline(knots): """ Create linear B spline that is zero outside [knots[0], knots[-1]] (knots is assumed to be sorted). """ fns = [] knots = np.array(knots) for i in range(knots.shape[0]-2): name = f'bs_{i}' k1, k2, k3 = knots[i:i+3] d1 = k2-k1 def anon(x,k1=k1,k2=k2,k3=k3): return ((x-k1) / d1 * np.greater(x, k1) * np.less_equal(x, k2) + (k3-x) / d1 * np.greater(x, k2) * np.less(x, k3)) fns.append(implemented_function(name, anon)) return fns # The splines are functions of t (time) bsp_fns = linBspline(np.arange(0,10,2)) # We're going to evaluate at these specific values of time tt = np.linspace(0,50,101) tvals= tt.view(np.dtype([('t', np.float64)])) # Some inter-stimulus intervals isis = np.random.uniform(low=0, high=3, size=(4,)) + 10. # Made into event onset times e = np.cumsum(isis) # Make event onsets into functions of time convolved with the spline functions. event_funcs = [utils.events(e, f=fn) for fn in bsp_fns] # Put into a formula. f = Formula(event_funcs) # The design matrix X = f.design(tvals, return_float=True) # Show the design matrix as line plots plt.plot(X[:,0]) plt.plot(X[:,1]) plt.plot(X[:,2]) plt.xlabel('time (s)') plt.title('B spline used as bases for an FIR response model') plt.show() nipy-0.6.1/examples/formula/multi_session_contrast.py000077500000000000000000000107671470056100100231540ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of more than one run in the same model """ import numpy as np from nipy.algorithms.statistics.api import Factor, Formula, Term from nipy.modalities.fmri import hrf, utils # HRF models we will use for each run. Just to show it can be done, use a # different HRF model for each run h1 = hrf.glover h2 = hrf.afni # Symbol for time in general. The 'events' function below will return models in # terms of 't', but we'll want models in terms of 't1' and 't2'. We need 't' # here so we can substitute. t = Term('t') # run 1 t1 = Term('t1') # Time within run 1 c11 = utils.events([3, 7, 10], f=h1) # Condition 1, run 1 # The events utility returns a formula in terms of 't' - general time c11 = c11.subs(t, t1) # Now make it in terms of time in run 1 # Same for conditions 2 and 3 c21 = utils.events([1, 3, 9], f=h1); c21 = c21.subs(t, t1) c31 = utils.events([2, 4, 8], f=h1); c31 = c31.subs(t, t1) # Add also a Fourier basis set for drift with frequencies 0.3, 0.5, 0.7 d1 = utils.fourier_basis([0.3, 0.5, 0.7]); d1 = d1.subs(t, t1) # Here's our formula for run 1 signal terms of time in run 1 (t1) f1 = Formula([c11,c21,c31]) + d1 # run 2 t2 = Term('t2') # Time within run 2 # Conditions 1 through 3 in run 2 c12 = utils.events([3.3, 7, 10], f=h2); c12 = c12.subs(t, t2) c22 = utils.events([1, 3.2, 9], f=h2); c22 = c22.subs(t, t2) c32 = utils.events([2, 4.2, 8], f=h2); c32 = c32.subs(t, t2) d2 = utils.fourier_basis([0.3, 0.5, 0.7]); d2 = d2.subs(t, t2) # Formula for run 2 signal in terms of time in run 2 (t2) f2 = Formula([c12, c22, c32]) + d2 # Factor giving constant for run. The [1, 2] means that there are two levels to # this factor, and that when we get to pass in values for this factor, # instantiating an actual design matrix (see below), a value of 1 means level # 1 and a value of 2 means level 2. run_factor = Factor('run', [1, 2]) run_1_coder = run_factor.get_term(1) # Term coding for level 1 run_2_coder = run_factor.get_term(2) # Term coding for level 2 # The multi run formula will combine the indicator (dummy value) terms from the # run factor with the formulae for the runs (which are functions of (run1, run2) # time. The run_factor terms are step functions that are zero when not in the # run, 1 when in the run. f = Formula([run_1_coder]) * f1 + Formula([run_2_coder]) * f2 + run_factor # Now, we evaluate the formula. So far we've been entirely symbolic. Now we # start to think about the values at which we want to evaluate our symbolic # formula. # We'll use these values for time within run 1. The times are in seconds from # the beginning of run 1. In our case run 1 was 20 seconds long. 101 below # gives 101 values from 0 to 20 including the endpoints, giving a dt of 0.2. tval1 = np.linspace(0, 20, 101) # run 2 lasts 10 seconds. These are the times in terms of the start of run 2. tval2 = np.linspace(0, 10, 51) # We pad out the tval1 / tval2 time vectors with zeros corresponding to the # TRs in run 2 / run 1. ttval1 = np.hstack([tval1, np.zeros(tval2.shape)]) ttval2 = np.hstack([np.zeros(tval1.shape), tval2]) # The arrays above now have 152=101+51 rows... # Vector of run numbers for each time point (with values 1 or 2) run_no = np.array([1]*tval1.shape[0] + [2]*tval2.shape[0]) # Create the recarray that will be used to create the design matrix. The # recarray gives the actual values for the symbolic terms in the formulae. In # our case the terms are t1, t2, and the (indicator coding) terms from the run # factor. rec = np.array(list(zip(ttval1, ttval2, run_no)), np.dtype([('t1', np.float64), ('t2', np.float64), ('run', np.int_)])) # The contrast we care about contrast = Formula([run_1_coder * c11 - run_2_coder * c12]) # # Create the design matrix X = f.design(rec, return_float=True) # Show ourselves the design space covered by the contrast, and the corresponding # contrast matrix preC = contrast.design(rec, return_float=True) # C is the matrix such that preC = X.dot(C.T) C = np.dot(np.linalg.pinv(X), preC) print(C) # We can also get this by passing the contrast into the design creation. X, c = f.design(rec, return_float=True, contrasts={'C': contrast}) assert np.allclose(C, c['C']) # Show the names of the non-trivial elements of the contrast nonzero = np.nonzero(np.fabs(C) >= 1e-5)[0] print((f.dtype.names[nonzero[0]], f.dtype.names[nonzero[1]])) print(((run_1_coder * c11), (run_2_coder * c12))) nipy-0.6.1/examples/formula/parametric_design.py000077500000000000000000000036121470056100100220110ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ In this example, we create a regression model for an event-related design in which the response to an event at time T[i] is modeled as depending on the amount of time since the last stimulus T[i-1] """ import numpy as np import sympy from nipy.algorithms.statistics.api import Formula, make_recarray from nipy.modalities.fmri import hrf, utils # Inter-stimulus intervals (time between events) dt = np.random.uniform(low=0, high=2.5, size=(50,)) # Onset times from the ISIs t = np.cumsum(dt) # We're going to model the amplitudes ('a') by dt (the time between events) a = sympy.Symbol('a') linear = utils.define('linear', utils.events(t, dt, f=hrf.glover)) quadratic = utils.define('quad', utils.events(t, dt, f=hrf.glover, g=a**2)) cubic = utils.define('cubic', utils.events(t, dt, f=hrf.glover, g=a**3)) f1 = Formula([linear, quadratic, cubic]) # Evaluate this time-based formula at specific times to make the design matrix tval = make_recarray(np.linspace(0,100, 1001), 't') X1 = f1.design(tval, return_float=True) # Now we make a model where the relationship of time between events and signal # is an exponential with a time constant tau l = sympy.Symbol('l') exponential = utils.events(t, dt, f=hrf.glover, g=sympy.exp(-l*a)) f3 = Formula([exponential]) # Make a design matrix by passing in time and required parameters params = make_recarray([(4.5, 3.5)], ('l', '_b0')) X3 = f3.design(tval, params, return_float=True) # the columns or d/d_b0 and d/dl tt = tval.view(np.float64) v1 = np.sum([hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0) v2 = np.sum([-3.5*a*hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0) V = np.array([v1,v2]).T W = V - np.dot(X3, np.dot(np.linalg.pinv(X3), V)) np.testing.assert_almost_equal((W**2).sum() / (V**2).sum(), 0) nipy-0.6.1/examples/formula/simple_contrast.py000077500000000000000000000041751470056100100215440ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ A simple contrast for an FMRI HRF model """ import numpy as np from nipy.algorithms.statistics.api import Formula, make_recarray from nipy.modalities.fmri import hrf, utils from nipy.modalities.fmri.fmristat import hrf as delay # We take event onsets, and a specified HRF model, and make symbolic functions # of time c1 = utils.events([3,7,10], f=hrf.glover) # Symbolic function of time c2 = utils.events([1,3,9], f=hrf.glover) # Symbolic function of time c3 = utils.events([3,4,6], f=delay.spectral[0]) # Symbolic function of time # We can also use a Fourier basis for some other onsets - again making symbolic # functions of time d = utils.fourier_basis([3,5,7]) # Formula # Make a formula for all four sets of onsets f = Formula([c1,c2,c3]) + d # A contrast is a formula expressed on the elements of the design formula contrast = Formula([c1-c2, c1-c3]) # Instantiate actual values of time at which to create the design matrix rows t = make_recarray(np.linspace(0,20,50), 't') # Make the design matrix, and get contrast matrices for the design X, c = f.design(t, return_float=True, contrasts={'C':contrast}) # c is a dictionary, containing a 2 by 9 matrix - the F contrast matrix for our # contrast of interest assert X.shape == (50, 9) assert c['C'].shape == (2, 9) # In this case the contrast matrix is rather obvious. np.testing.assert_almost_equal(c['C'], [[1,-1, 0, 0, 0, 0, 0, 0, 0], [1, 0, -1, 0, 0, 0, 0, 0, 0]]) # We can get the design implied by our contrast at our chosen times preC = contrast.design(t, return_float=True) np.testing.assert_almost_equal(preC[:, 0], X[:, 0] - X[:, 1]) np.testing.assert_almost_equal(preC[:, 1], X[:, 0] - X[:, 2]) # So, X . c['C'].T \approx preC np.testing.assert_almost_equal(np.dot(X, c['C'].T), preC) # So what is the matrix C such that preC = X . C? Yes, it's c['C'] C = np.dot(np.linalg.pinv(X), preC).T np.testing.assert_almost_equal(C, c['C']) # The contrast matrix (approx equal to c['C']) print(C) nipy-0.6.1/examples/image_from_array.py000077500000000000000000000024641470056100100201730ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Create a nifti image from a numpy array and an affine transform. """ import numpy as np from nipy import load_image, save_image from nipy.core.api import Image, vox2scanner # This gets the filename for a tiny example file from nipy.testing import anatfile # Load an image to get an array and affine # # Use one of our test files to get an array and affine (as numpy array) from. img = load_image(anatfile) arr = img.get_fdata() affine_array = img.coordmap.affine.copy() # 1) Create a CoordinateMap from the affine transform which specifies # the mapping from input to output coordinates. The ``vox2scanner`` function # makes a coordinate map from voxels to scanner coordinates. Other options are # ``vox2mni`` or ``vox2talairach`` affine_coordmap = vox2scanner(affine_array) # 2) Create a nipy image from the array and CoordinateMap newimg = Image(arr, affine_coordmap) # Save the nipy image to the specified filename save_image(newimg, 'an_image.nii.gz') # Reload and verify the data and affine were saved correctly. img_back = load_image('an_image.nii.gz') assert np.allclose(img_back.get_fdata(), img.get_fdata()) assert np.allclose(img_back.coordmap.affine, img.coordmap.affine) nipy-0.6.1/examples/interfaces/000077500000000000000000000000001470056100100164305ustar00rootroot00000000000000nipy-0.6.1/examples/interfaces/process_ds105.py000077500000000000000000000257021470056100100214050ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Single subject analysis script for SPM / Open FMRI ds105 https://openfmri.org/dataset/ds000105 Download and extract the ds105 archive to some directory. Run this script with:: process_ds105.py ~/data/ds105 where ``~/data/ds105`` is the directory containing the ds105 data. The example uses the very basic MATLAB / SPM interface routines in NIPY. If you need more than very basic use, please consider using nipype. nipype has extended capabilities to interface with external tools and for dataflow management. nipype can handle vanilla SPM in MATLAB or SPM run through the MATLAB common runtime (free from MATLAB Licensing). ''' import gzip import sys from copy import deepcopy from glob import glob from os.path import abspath, isfile, splitext from os.path import join as pjoin from warnings import warn import numpy as np import nipy.interfaces.matlab as nimat from nipy.interfaces.spm import ( fltcols, fname_presuffix, fnames_presuffix, make_job, run_jobdef, scans_for_fnames, spm_info, ) # The batch scripts currently need SPM5 nimat.matlab_cmd = 'matlab-spm8 -nodesktop -nosplash' # This definition is partly for slice timing. We can't do slice timing for this # dataset because the slice dimension is the first, and SPM assumes it is the # last. N_SLICES = 40 # X slices STUDY_DEF = { 'TR': 2.5, 'n_slices': N_SLICES, 'time_to_space': (list(range(1, N_SLICES, 2)) + list(range(2, N_SLICES, 2))) } def _sorted_prefer_nii(file_list): """ Strip any filenames ending nii.gz if matching .nii filename in list """ preferred = [] for fname in file_list: if not fname.endswith('.gz'): preferred.append(fname) else: nogz, ext = splitext(fname) if nogz not in file_list: preferred.append(fname) return sorted(preferred) def get_fdata(data_path, subj_id): data_path = abspath(data_path) data_def = {} subject_path = pjoin(data_path, 'sub%03d' % subj_id) functionals = _sorted_prefer_nii( glob(pjoin(subject_path, 'BOLD', 'task*', 'bold*.nii*'))) anatomicals = _sorted_prefer_nii( glob(pjoin(subject_path, 'anatomy', 'highres001.nii*'))) for flist in (anatomicals, functionals): for i, fname in enumerate(flist): nogz, gz_ext = splitext(fname) if gz_ext == '.gz': if not isfile(nogz): contents = gzip.open(fname, 'rb').read() with open(nogz, 'wb') as fobj: fobj.write(contents) flist[i] = nogz if len(anatomicals) == 0: data_def['anatomical'] = None else: data_def['anatomical'] = anatomicals[0] data_def['functionals'] = functionals return data_def def default_ta(tr, nslices): slice_time = tr / float(nslices) return slice_time * (nslices - 1) class SPMSubjectAnalysis: """ Class to preprocess single subject in SPM """ def __init__(self, data_def, study_def, ana_def): self.data_def = deepcopy(data_def) self.study_def = self.add_study_defaults(study_def) self.ana_def = self.add_ana_defaults(deepcopy(ana_def)) def add_study_defaults(self, study_def): full_study_def = deepcopy(study_def) if 'TA' not in full_study_def: full_study_def['TA'] = default_ta( full_study_def['TR'], full_study_def['n_slices']) return full_study_def def add_ana_defaults(self, ana_def): full_ana_def = deepcopy(ana_def) if 'fwhm' not in full_ana_def: full_ana_def['fwhm'] = 8.0 return full_ana_def def slicetime(self, in_prefix='', out_prefix='a'): sess_scans = scans_for_fnames( fnames_presuffix(self.data_def['functionals'], in_prefix)) sdef = self.study_def stinfo = make_job('temporal', 'st', { 'scans': sess_scans, 'so': sdef['time_to_space'], 'tr': sdef['TR'], 'ta': sdef['TA'], 'nslices': float(sdef['n_slices']), 'refslice':1, 'prefix': out_prefix, }) run_jobdef(stinfo) return out_prefix + in_prefix def realign(self, in_prefix=''): sess_scans = scans_for_fnames( fnames_presuffix(self.data_def['functionals'], in_prefix)) rinfo = make_job('spatial', 'realign', [{ 'estimate':{ 'data':sess_scans, 'eoptions':{ 'quality': 0.9, 'sep': 4.0, 'fwhm': 5.0, 'rtm': True, 'interp': 2.0, 'wrap': [0.0,0.0,0.0], 'weight': [] } } }]) run_jobdef(rinfo) return in_prefix def reslice(self, in_prefix='', out_prefix='r', out=('1..n', 'mean')): which = [0, 0] if 'mean' in out: which[1] = 1 if '1..n' in out or 'all' in out: which[0] = 2 elif '2..n' in out: which[0] = 1 sess_scans = scans_for_fnames( fnames_presuffix(self.data_def['functionals'], in_prefix)) rsinfo = make_job('spatial', 'realign', [{ 'write':{ 'data': np.vstack(sess_scans.flat), 'roptions':{ 'which': which, 'interp':4.0, 'wrap':[0.0,0.0,0.0], 'mask':True, 'prefix': out_prefix } } }]) run_jobdef(rsinfo) return out_prefix + in_prefix def coregister(self, in_prefix=''): func1 = self.data_def['functionals'][0] mean_fname = fname_presuffix(func1, 'mean' + in_prefix) crinfo = make_job('spatial', 'coreg', [{ 'estimate':{ 'ref': np.asarray(mean_fname, dtype=object), 'source': np.asarray(self.data_def['anatomical'], dtype=object), 'other': [''], 'eoptions':{ 'cost_fun':'nmi', 'sep':[4.0, 2.0], 'tol':np.array( [0.02,0.02,0.02, 0.001,0.001,0.001, 0.01,0.01,0.01, 0.001,0.001,0.001]).reshape(1,12), 'fwhm':[7.0, 7.0] } } }]) run_jobdef(crinfo) return in_prefix def seg_norm(self, in_prefix=''): def_tpms = np.zeros((3,1), dtype=object) spm_path = spm_info.spm_path def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'), def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'), def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii') data = np.zeros((1,), dtype=object) data[0] = self.data_def['anatomical'] sninfo = make_job('spatial', 'preproc', { 'data': data, 'output':{ 'GM':fltcols([0,0,1]), 'WM':fltcols([0,0,1]), 'CSF':fltcols([0,0,0]), 'biascor':1.0, 'cleanup':False, }, 'opts':{ 'tpm':def_tpms, 'ngaus':fltcols([2,2,2,4]), 'regtype':'mni', 'warpreg':1.0, 'warpco':25.0, 'biasreg':0.0001, 'biasfwhm':60.0, 'samp':3.0, 'msk':np.array([], dtype=object), } }) run_jobdef(sninfo) return in_prefix def norm_write(self, in_prefix='', out_prefix='w'): sess_scans = scans_for_fnames( fnames_presuffix(self.data_def['functionals'], in_prefix)) matname = fname_presuffix(self.data_def['anatomical'], suffix='_seg_sn.mat', use_ext=False) subj = { 'matname': np.zeros((1,), dtype=object), 'resample': np.vstack(sess_scans.flat), } subj['matname'][0] = matname roptions = { 'preserve':False, 'bb':np.array([[-78,-112, -50],[78,76,85.0]]), 'vox':fltcols([2.0,2.0,2.0]), 'interp':1.0, 'wrap':[0.0,0.0,0.0], 'prefix': out_prefix, } nwinfo = make_job('spatial', 'normalise', [{ 'write':{ 'subj': subj, 'roptions': roptions, } }]) run_jobdef(nwinfo) # knock out the list of images, replacing with only one subj['resample'] = np.zeros((1,), dtype=object) subj['resample'][0] = self.data_def['anatomical'] roptions['interp'] = 4.0 run_jobdef(nwinfo) return out_prefix + in_prefix def smooth(self, in_prefix='', out_prefix='s'): fwhm = self.ana_def['fwhm'] try: len(fwhm) except TypeError: fwhm = [fwhm] * 3 fwhm = np.asarray(fwhm, dtype=np.float64).reshape(1,3) sess_scans = scans_for_fnames( fnames_presuffix(self.data_def['functionals'], in_prefix)) sinfo = make_job('spatial', 'smooth', {'data':np.vstack(sess_scans.flat), 'fwhm':fwhm, 'dtype':0}) run_jobdef(sinfo) return out_prefix + in_prefix def process_subject(ddef, study_def, ana_def): """ Process subject from subject data dict `ddef` """ if not ddef['anatomical']: warn("No anatomical, aborting processing") return ana = SPMSubjectAnalysis(ddef, study_def, ana_def) # st_prefix = ana.slicetime('') # We can't run slice timing st_prefix = '' ana.realign(in_prefix=st_prefix) ana.reslice(in_prefix=st_prefix, out=('mean',)) ana.coregister(in_prefix=st_prefix) ana.seg_norm() n_st_prefix = ana.norm_write(st_prefix) ana.smooth(n_st_prefix) def get_subjects(data_path, subj_ids, study_def, ana_def): return [get_fdata(data_path, subj_id) for subj_id in subj_ids] def main(): try: data_path = sys.argv[1] except IndexError: raise OSError('Need ds105 data path as input') if len(sys.argv) > 2: subj_ids = [int(id) for id in sys.argv[2:]] else: subj_ids = range(1, 7) for subj_id in subj_ids: ddef = get_fdata(data_path, subj_id) assert len(ddef['functionals']) in (11, 12) process_subject(ddef, STUDY_DEF, {}) if __name__ == '__main__': main() nipy-0.6.1/examples/interfaces/process_fiac.py000077500000000000000000000141071470056100100214500ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Single subject analysis script for SPM / FIAC ''' import sys from glob import glob from os.path import join as pjoin import numpy as np from nipy.interfaces.spm import ( fltcols, fname_presuffix, fnames_presuffix, make_job, run_jobdef, scans_for_fnames, spm_info, ) def get_fdata(data_path, subj_id): data_def = {} subject_path = pjoin(data_path, f'fiac{subj_id}') data_def['functionals'] = sorted( glob(pjoin(subject_path, 'functional_*.nii'))) anatomicals = glob(pjoin(subject_path, 'anatomical.nii')) if len(anatomicals) == 1: data_def['anatomical'] = anatomicals[0] elif len(anatomicals) == 0: data_def['anatomical'] = None else: raise ValueError('Too many anatomicals') return data_def def slicetime(data_def): sess_scans = scans_for_fnames(data_def['functionals']) stinfo = make_job('temporal', 'st', { 'scans': sess_scans, 'so':range(1,31,2) + range(2,31,2), 'tr':2.5, 'ta':2.407, 'nslices':float(30), 'refslice':1 }) run_jobdef(stinfo) def realign(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rinfo = make_job('spatial', 'realign', [{ 'estimate':{ 'data':sess_scans, 'eoptions':{ 'quality':0.9, 'sep':4.0, 'fwhm':5.0, 'rtm':True, 'interp':2.0, 'wrap':[0.0,0.0,0.0], 'weight':[] } } }]) run_jobdef(rinfo) def reslice(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rsinfo = make_job('spatial', 'realign', [{ 'write':{ 'data': np.vstack(sess_scans.flat), 'roptions':{ 'which':[2, 1], 'interp':4.0, 'wrap':[0.0,0.0,0.0], 'mask':True, } } }]) run_jobdef(rsinfo) def coregister(data_def): func1 = data_def['functionals'][0] mean_fname = fname_presuffix(func1, 'meana') crinfo = make_job('spatial', 'coreg', [{ 'estimate':{ 'ref': [mean_fname], 'source': [data_def['anatomical']], 'other': [[]], 'eoptions':{ 'cost_fun':'nmi', 'sep':[4.0, 2.0], 'tol':np.array( [0.02,0.02,0.02, 0.001,0.001,0.001, 0.01,0.01,0.01, 0.001,0.001,0.001]).reshape(1,12), 'fwhm':[7.0, 7.0] } } }]) run_jobdef(crinfo) def segnorm(data_def): def_tpms = np.zeros((3,1), dtype=object) spm_path = spm_info.spm_path def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'), def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'), def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii') data = np.zeros((1,), dtype=object) data[0] = data_def['anatomical'] sninfo = make_job('spatial', 'preproc', { 'data': data, 'output':{ 'GM':fltcols([0,0,1]), 'WM':fltcols([0,0,1]), 'CSF':fltcols([0,0,0]), 'biascor':1.0, 'cleanup':False, }, 'opts':{ 'tpm':def_tpms, 'ngaus':fltcols([2,2,2,4]), 'regtype':'mni', 'warpreg':1.0, 'warpco':25.0, 'biasreg':0.0001, 'biasfwhm':60.0, 'samp':3.0, 'msk':np.array([], dtype=object), } }) run_jobdef(sninfo) def norm_write(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) matname = fname_presuffix(data_def['anatomical'], suffix='_seg_sn.mat', use_ext=False) subj = { 'matname': np.zeros((1,), dtype=object), 'resample': np.vstack(sess_scans.flat), } subj['matname'][0] = matname roptions = { 'preserve':False, 'bb':np.array([[-78,-112, -50],[78,76,85.0]]), 'vox':fltcols([2.0,2.0,2.0]), 'interp':1.0, 'wrap':[0.0,0.0,0.0], } nwinfo = make_job('spatial', 'normalise', [{ 'write':{ 'subj': subj, 'roptions': roptions, } }]) run_jobdef(nwinfo) # knock out the list of images, replacing with only one subj['resample'] = np.zeros((1,), dtype=object) subj['resample'][0] = data_def['anatomical'] roptions['interp'] = 4.0 run_jobdef(nwinfo) def smooth(data_def, fwhm=8.0): try: len(fwhm) except TypeError: fwhm = [fwhm] * 3 fwhm = np.asarray(fwhm, dtype=np.float64).reshape(1,3) sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'wa')) sinfo = make_job('spatial', 'smooth', {'data':np.vstack(sess_scans.flat), 'fwhm':fwhm, 'dtype':0}) run_jobdef(sinfo) def process_subject(ddef): if not ddef['anatomical']: return slicetime(ddef) realign(ddef) reslice(ddef) coregister(ddef) segnorm(ddef) norm_write(ddef) smooth(ddef) def process_subjects(data_path, subj_ids): for subj_id in subj_ids: ddef = get_fdata(data_path, subj_id) process_subject(ddef) if __name__ == '__main__': try: data_path = sys.argv[1] except IndexError: raise OSError('Need FIAC data path as input') try: subj_ids = sys.argv[2:] except IndexError: subj_ids = range(16) process_subjects(data_path, subj_ids) nipy-0.6.1/examples/labs/000077500000000000000000000000001470056100100152265ustar00rootroot00000000000000nipy-0.6.1/examples/labs/bayesian_structural_analysis.py000077500000000000000000000074621470056100100236020ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This script generates a noisy multi-subject activation image dataset and applies the Bayesian structural analysis on it Requires matplotlib Author : Bertrand Thirion, 2009-2013 """ print(__doc__) import numpy as np import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models.bayesian_structural_analysis import compute_landmarks from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape def display_landmarks_2d(landmarks, hrois, stats): """ Plots the landmarks and associated rois as images""" shape = stats[0].shape n_subjects = len(stats) lmax = 0 grp_map, density = np.zeros(shape), np.zeros(shape) if landmarks is not None: domain = landmarks.domain grp_map = landmarks.map_label(domain.coord, .8, sigma).reshape(shape) density = landmarks.kernel_density(k=None, coord=domain.coord, sigma=sigma).reshape(shape) lmax = landmarks.k + 2 # Figure 1: input data fig_input = plt.figure(figsize=(8, 3.5)) fig_input.text(.5,.9, "Input activation maps", ha='center') vmin, vmax = stats.min(), stats.max() for subject in range(n_subjects): plt.subplot(n_subjects // 5, 5, subject + 1) plt.imshow(stats[subject], interpolation='nearest', vmin=vmin, vmax=vmax) plt.axis('off') # Figure 2: individual hrois fig_output = plt.figure(figsize=(8, 3.5)) fig_output.text(.5, .9, "Individual landmark regions", ha="center") for subject in range(n_subjects): plt.subplot(n_subjects // 5, 5, subject + 1) lw = - np.ones(shape) if hrois[subject].k > 0: nls = hrois[subject].get_roi_feature('label') nls[nls == - 1] = np.size(landmarks) + 2 for k in range(hrois[subject].k): np.ravel(lw)[hrois[subject].label == k] = nls[k] plt.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax) plt.axis('off') # Figure 3: Group-level results plt.figure(figsize=(6, 3)) plt.subplot(1, 2, 1) plt.imshow(grp_map, interpolation='nearest', vmin=-1, vmax=lmax) plt.title('group-level position 80% \n confidence regions', fontsize=10) plt.axis('off') plt.colorbar(shrink=.8) plt.subplot(1, 2, 2) plt.imshow(density, interpolation='nearest') plt.title('Spatial density under h1', fontsize=10) plt.axis('off') plt.colorbar(shrink=.8) ############################################################################### # Main script ############################################################################### # generate the data n_subjects = 10 shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([5, 7, 6]) sjitter = 1.0 stats = simul.surrogate_2d_dataset(n_subj=n_subjects, shape=shape, pos=pos, ampli=ampli, width=5.0) # set various parameters threshold = float(st.t.isf(0.01, 100)) sigma = 4. / 1.5 prevalence_threshold = n_subjects * .25 prevalence_pval = 0.9 smin = 5 algorithm = 'co-occurrence' # 'density' domain = grid_domain_from_shape(shape) # get the functional information stats_ = np.array([np.ravel(stats[k]) for k in range(n_subjects)]).T # run the algo landmarks, hrois = compute_landmarks( domain, stats_, sigma, prevalence_pval, prevalence_threshold, threshold, smin, method='prior', algorithm=algorithm) display_landmarks_2d(landmarks, hrois, stats) if landmarks is not None: landmarks.show() plt.show() nipy-0.6.1/examples/labs/blob_extraction.py000077500000000000000000000064271470056100100207720ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This script makes a noisy activation image and extracts the blobs from it. Requires matplotlib Author : Bertrand Thirion, 2009--2012 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import matplotlib as mpl import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models import hroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape # --------------------------------------------------------- # simulate an activation image # --------------------------------------------------------- shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() values = dataset.ravel() #------------------------------------------------------- # Computations #------------------------------------------------------- # create a domain descriptor associated with this domain = grid_domain_from_shape(shape) nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=2.0, smin=3) # create an average activaion image activation = [values[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) bmap = nroi.feature_to_voxel_map( 'activation', roi=True, method="mean").reshape(shape) #-------------------------------------------------------- # Result display #-------------------------------------------------------- aux1 = (0 - values.min()) / (values.max() - values.min()) aux2 = (bmap.max() - values.min()) / (values.max() - values.min()) cdict = {'red': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.7), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 0.5, 0.5), (1.0, 1.0, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.figure(figsize=(12, 3)) plt.subplot(1, 3, 1) plt.imshow(dataset, interpolation='nearest', cmap=my_cmap) cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.axis('off') plt.title('Thresholded data') # plot the blob label image plt.subplot(1, 3, 2) plt.imshow(nroi.feature_to_voxel_map('id', roi=True).reshape(shape), interpolation='nearest') plt.colorbar() plt.title('Blob labels') # plot the blob-averaged signal image aux = 0.01 cdict = {'red': ((0.0, 0.0, 0.7), (aux, 0.7, 0.7), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 0.5, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.subplot(1, 3, 3) plt.imshow(bmap, interpolation='nearest', cmap=my_cmap) cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.axis('off') plt.title('Blob average') plt.show() nipy-0.6.1/examples/labs/demo_dmtx.py000077500000000000000000000040731470056100100175670ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Examples of design matrices specification and and computation (event-related design, FIR design, etc) Requires matplotlib Author : Bertrand Thirion: 2009-2010 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import ( BlockParadigm, EventRelatedParadigm, ) # frame times tr = 1.0 nscans = 128 frametimes = np.linspace(0, (nscans - 1) * tr, nscans) # experimental paradigm conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] hrf_model = 'canonical' motion = np.cumsum(np.random.randn(128, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] #event-related design matrix paradigm = EventRelatedParadigm(conditions, onsets) X1 = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, add_regs=motion, add_reg_names=add_reg_names) # block design matrix duration = 7 * np.ones(9) paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial', drift_order=3) # FIR model paradigm = EventRelatedParadigm(conditions, onsets) hrf_model = 'fir' X3 = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=np.arange(1, 6)) # plot the results fig = plt.figure(figsize=(10, 6)) ax = plt.subplot(1, 3, 1) X1.show(ax=ax) ax.set_title('Event-related design matrix', fontsize=12) ax = plt.subplot(1, 3, 2) X2.show(ax=ax) ax.set_title('Block design matrix', fontsize=12) ax = plt.subplot(1, 3, 3) X3.show(ax=ax) ax.set_title('FIR design matrix', fontsize=12) plt.subplots_adjust(top=0.9, bottom=0.25) plt.show() nipy-0.6.1/examples/labs/example_glm.py000077500000000000000000000061571470056100100201060ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This is an example where: 1. An sequence of fMRI volumes are simulated 2. A design matrix describing all the effects related to the data is computed 3. A GLM is applied to all voxels 4. A contrast image is created Requires matplotlib Author : Bertrand Thirion, 2010 """ print(__doc__) import os import os.path as op import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import Nifti1Image, save import nipy.modalities.fmri.design_matrix as dm from nipy.labs.utils.simul_multisubject_fmri_dataset import surrogate_4d_dataset from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm from nipy.modalities.fmri.glm import GeneralLinearModel ####################################### # Simulation parameters ####################################### # volume mask shape = (20, 20, 20) affine = np.eye(4) # Acquisition parameters: number of scans (n_scans) and volume repetition time # value in seconds n_scans = 128 tr = 2.4 # input paradigm information frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # conditions are 0 1 0 1 0 1 ... conditions = np.arange(20) % 2 # 20 onsets (in sec), first event 10 sec after the start of the first scan onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20) # model with canonical HRF (could also be : # 'canonical with derivative' or 'fir' hrf_model = 'canonical' # fake motion parameters to be included in the model motion = np.cumsum(np.random.randn(n_scans, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] ######################################## # Design matrix ######################################## paradigm = EventRelatedParadigm(conditions, onsets) X, names = dm.dmtx_light(frametimes, paradigm, drift_model='cosine', hfcut=128, hrf_model=hrf_model, add_regs=motion, add_reg_names=add_reg_names) ####################################### # Get the FMRI data ####################################### fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans)[0] # if you want to save it as an image data_file = 'fmri_data.nii' save(fmri_data, data_file) ######################################## # Perform a GLM analysis ######################################## # GLM fit Y = fmri_data.get_fdata().reshape(np.prod(shape), n_scans) glm = GeneralLinearModel(X) glm.fit(Y.T) # specify the contrast [1 -1 0 ..] contrast = np.zeros(X.shape[1]) contrast[0] = 1 contrast[1] = - 1 # compute the contrast image related to it zvals = glm.contrast(contrast).z_score() contrast_image = Nifti1Image(np.reshape(zvals, shape), affine) # if you want to save the contrast as an image contrast_path = 'zmap.nii' save(contrast_image, contrast_path) print(f'Wrote the some of the results as images in directory {op.abspath(os.getcwd())}') h, c = np.histogram(zvals, 100) # Show the histogram plt.figure() plt.bar(c[: - 1], h, width=.1) plt.title(' Histogram of the z-values') plt.show() nipy-0.6.1/examples/labs/glm_lowlevel.py000077500000000000000000000021731470056100100202760ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This example simulates a number of pure Gaussian white noise signals, then fits each one in terms of two regressors: a constant baseline, and a linear function of time. The voxelwise t statistics associated with the baseline coefficient are then computed. """ print(__doc__) import numpy as np from nipy.modalities.fmri.glm import GeneralLinearModel dimt = 100 dimx = 10 dimy = 11 dimz = 12 # axis defines the "time direction" y = np.random.randn(dimt, dimx * dimy * dimz) axis = 0 X = np.array([np.ones(dimt), range(dimt)]) X = X.T ## the design matrix X must have dimt lines mod = GeneralLinearModel(X) mod.fit(y) # Define a t contrast tcon = mod.contrast([1, 0]) # Compute the t-stat t = tcon.stat() ## t = tcon.stat(baseline=1) to test effects > 1 # Compute the p-value p = tcon.p_value() # Compute the z-score z = tcon.z_score() # Perform a F test without keeping the F stat p = mod.contrast([[1, 0], [1, - 1]]).p_value() print(np.shape(y)) print(np.shape(X)) print(np.shape(z)) nipy-0.6.1/examples/labs/group_reproducibility_analysis.py000077500000000000000000000075741470056100100241500ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of script to analyse the reproducibility in group studies using a bootstrap procedure Needs matplotlib Author: Bertrand Thirion, 2005-2009 """ print(__doc__) import numpy as np # Scipy stats needed for thresholding import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models.discrete_domain import grid_domain_from_binary_array from nipy.labs.utils.reproducibility_measures import ( cluster_reproducibility, map_reproducibility, peak_reproducibility, voxel_reproducibility, ) ############################################################################### # Generate the data n_subj = 105 shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([2.5, 3.5, 3]) betas = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=5.0) n_vox = np.prod(shape) # set the variance at 1 everywhere func = np.reshape(betas, (n_subj, n_vox)).T var = np.ones((n_vox, n_subj)) domain = grid_domain_from_binary_array(np.ones((shape[0], shape[1], 1))) ############################################################################### # Run reproducibility analysis ngroups = 10 thresholds = np.arange(.5, 6., .5) sigma = 2.0 csize = 10 niter = 10 method = 'crfx' verbose = 0 # do not use permutations swap = False kap = [] clt = [] pk = [] sens = [] for threshold in thresholds: kwargs={'threshold': threshold, 'csize': csize} kappa = [] cls = [] sent = [] peaks = [] for i in range(niter): k = voxel_reproducibility(func, var, domain, ngroups, method, swap, verbose, **kwargs) kappa.append(k) cld = cluster_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) cls.append(cld) peak = peak_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) peaks.append(peak) seni = map_reproducibility(func, var, domain, ngroups, method, True, verbose, threshold=threshold, csize=csize).mean()/ngroups sent.append(seni) sens.append(np.array(sent)) kap.append(np.array(kappa)) clt.append(np.array(cls)) pk.append(np.array(peaks)) ############################################################################### # Visualize the results aux = st.norm.sf(thresholds) a = plt.figure(figsize=(11, 6)) plt.subplot(1, 3, 1) plt.boxplot(kap) plt.title('voxel-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.subplot(1, 3, 2) plt.boxplot(clt) plt.title('cluster-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.subplot(1, 3, 3) plt.boxplot(pk, notch=1) plt.title('peak-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.figure() for q, threshold in enumerate(thresholds): plt.subplot(3, len(thresholds) // 3 + 1, q + 1) rmap = map_reproducibility(func, var, domain, ngroups, method, verbose, threshold=threshold, csize=csize) rmap = np.reshape(rmap, shape) plt.imshow(rmap, interpolation=None, vmin=0, vmax=ngroups) plt.title(f'threshold: {threshold:g}', fontsize=10) plt.axis('off') plt.suptitle('Map reproducibility for different thresholds') plt.show() nipy-0.6.1/examples/labs/hierarchical_rois.py000077500000000000000000000037221470056100100212610ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = \ """ Example of a script that crates a 'hierarchical roi' structure from the blob model of an image Needs matplotlib Author: Bertrand Thirion, 2008-2009 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models import hroi from nipy.labs.spatial_models.discrete_domain import domain_from_binary_array ############################################################################## # simulate the data shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() # create a domain descriptor associated with this domain = domain_from_binary_array(dataset ** 2 > 0) nroi = hroi.HROI_as_discrete_domain_blobs(domain, dataset.ravel(), threshold=2., smin=5) n1 = nroi.copy() nroi.reduce_to_leaves() td = n1.make_forest().depth_from_leaves() root = np.argmax(td) lv = n1.make_forest().get_descendants(root) u = nroi.make_graph().cc() flat_data = dataset.ravel() activation = [flat_data[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) label = np.reshape(n1.label, shape) label_ = np.reshape(nroi.label, shape) # make a figure plt.figure(figsize=(10, 4)) plt.subplot(1, 3, 1) plt.imshow(np.squeeze(dataset)) plt.title('Input map') plt.axis('off') plt.subplot(1, 3, 2) plt.title('Nested Rois') plt.imshow(label, interpolation='Nearest') plt.axis('off') plt.subplot(1, 3, 3) plt.title('Leave Rois') plt.imshow(label_, interpolation='Nearest') plt.axis('off') plt.show() nipy-0.6.1/examples/labs/histogram_fits.py000077500000000000000000000061451470056100100206330ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of a script that performs histogram analysis of an activation image, to estimate activation Z-score with various heuristics: * Gamma-Gaussian model * Gaussian mixture model * Empirical normal null This example is based on a (simplistic) simulated image. Needs matplotlib """ # Author : Bertrand Thirion, Gael Varoquaux 2008-2009 print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.algorithms.statistics.empirical_pvalue as en import nipy.labs.utils.simul_multisubject_fmri_dataset as simul ############################################################################### # simulate the data shape = (60, 60) pos = 2 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() fig = plt.figure(figsize=(12, 10)) plt.subplot(3, 3, 1) plt.imshow(dataset, cmap=plt.cm.hot) plt.colorbar() plt.title('Raw data') Beta = dataset.ravel().squeeze() ############################################################################### # fit Beta's histogram with a Gamma-Gaussian mixture gam_gaus_pp = en.gamma_gaussian_fit(Beta, Beta) gam_gaus_pp = np.reshape(gam_gaus_pp, (shape[0], shape[1], 3)) plt.figure(fig.number) plt.subplot(3, 3, 4) plt.imshow(gam_gaus_pp[..., 0], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n first component posterior proba.') plt.colorbar() plt.subplot(3, 3, 5) plt.imshow(gam_gaus_pp[..., 1], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n second component posterior proba.') plt.colorbar() plt.subplot(3, 3, 6) plt.imshow(gam_gaus_pp[..., 2], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n third component posterior proba.') plt.colorbar() ############################################################################### # fit Beta's histogram with a mixture of Gaussians alpha = 0.01 gaus_mix_pp = en.three_classes_GMM_fit(Beta, None, alpha, prior_strength=100) gaus_mix_pp = np.reshape(gaus_mix_pp, (shape[0], shape[1], 3)) plt.figure(fig.number) plt.subplot(3, 3, 7) plt.imshow(gaus_mix_pp[..., 0], cmap=plt.cm.hot) plt.title('Gaussian mixture,\n first component posterior proba.') plt.colorbar() plt.subplot(3, 3, 8) plt.imshow(gaus_mix_pp[..., 1], cmap=plt.cm.hot) plt.title('Gaussian mixture,\n second component posterior proba.') plt.colorbar() plt.subplot(3, 3, 9) plt.imshow(gaus_mix_pp[..., 2], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n third component posterior proba.') plt.colorbar() ############################################################################### # Fit the null mode of Beta with an empirical normal null efdr = en.NormalEmpiricalNull(Beta) emp_null_fdr = efdr.fdr(Beta) emp_null_fdr = emp_null_fdr.reshape(shape) plt.subplot(3, 3, 3) plt.imshow(1 - emp_null_fdr, cmap=plt.cm.hot) plt.colorbar() plt.title('Empirical FDR\n ') plt.show() nipy-0.6.1/examples/labs/multi_subject_parcellation.py000077500000000000000000000036061470056100100232160ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This script contains a quick demo on a multi-subject parcellation on a toy 2D example. Note how the middle parcels adapt to the individual configuration. Needs matplotlib """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.spatial_models.discrete_domain as dom import nipy.labs.spatial_models.hierarchical_parcellation as hp import nipy.labs.utils.simul_multisubject_fmri_dataset as simul # step 1: generate some synthetic data n_subj = 10 shape = (60, 60) pos = 3 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([5, 7, 6]) sjitter = 6.0 dataset = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=10.0) # dataset represents 2D activation images from n_subj subjects, # step 2 : prepare all the information for the parcellation nbparcel = 10 ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) domain = dom.grid_domain_from_shape(shape) # step 3 : run the algorithm Pa = hp.hparcel(domain, ldata, nbparcel, mu=3.0) # note: play with mu to change the 'stiffness of the parcellation' # step 4: look at the results Label = np.array([np.reshape(Pa.individual_labels[:, s], shape) for s in range(n_subj)]) plt.figure(figsize=(8, 4)) plt.title('Input data') for s in range(n_subj): plt.subplot(2, 5, s + 1) plt.imshow(dataset[s], interpolation='nearest') plt.axis('off') plt.figure(figsize=(8, 4)) plt.title('Resulting parcels') for s in range(n_subj): plt.subplot(2, 5, s+1) plt.imshow(Label[s], interpolation='nearest', vmin=-1, vmax=nbparcel) plt.axis('off') plt.show() nipy-0.6.1/examples/labs/need_data/000077500000000000000000000000001470056100100171325ustar00rootroot00000000000000nipy-0.6.1/examples/labs/need_data/bayesian_structural_analysis.py000077500000000000000000000031101470056100100254700ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of a script that uses the BSA (Bayesian Structural Analysis) i.e. nipy.labs.spatial_models.bayesian_structural_analysis module. Author : Bertrand Thirion, 2008-2013 """ print(__doc__) #autoindent from os import getcwd, mkdir, path # Local import from get_data_light import DATA_DIR, get_second_level_dataset from numpy import array from scipy import stats from nipy.labs.spatial_models.bsa_io import make_bsa_image # Get the data nbsubj = 12 nbeta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nbsubj)] betas = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nbsubj)] missing_file = array([not path.exists(m) for m in mask_images + betas]).any() if missing_file: get_second_level_dataset() # set various parameters subj_id = ['%04d' % i for i in range(12)] threshold = float(stats.t.isf(0.01, 100)) sigma = 4. prevalence_threshold = 2 prevalence_pval = 0.95 smin = 5 write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) algorithm = 'density' print('algorithm used:', algorithm) # call the function landmarks, individual_rois = make_bsa_image( mask_images, betas, threshold, smin, sigma, prevalence_threshold, prevalence_pval, write_dir, algorithm=algorithm, contrast_id='%04d' % nbeta) print(f"Wrote all the results in directory {write_dir}") nipy-0.6.1/examples/labs/need_data/demo_blob_from_image.py000077500000000000000000000047471470056100100236320ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This script generates a noisy activation image and extracts the blob from it. This creates as output - a label image representing the nested blobs, - an image of the average signal per blob and - an image with the terminal blob only Author : Bertrand Thirion, 2009 """ #autoindent from os import getcwd, mkdir, path # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import Nifti1Image, load, save from nipy.labs.spatial_models import hroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image # data paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # parameters threshold = 3.0 # blob-forming threshold smin = 5 # size threshold on blobs # prepare the data nim = load(input_image) mask_image = Nifti1Image((nim.get_fdata() ** 2 > 0).astype('u8'), nim.affine) domain = grid_domain_from_image(mask_image) data = nim.get_fdata() values = data[data != 0] # compute the nested roi object nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=threshold, smin=smin) # compute region-level activation averages activation = [values[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) average_activation = nroi.representative_feature('activation') # saving the blob image,i. e. a label image descrip = f"blob image extracted from {input_image}" wim = nroi.to_image('id', roi=True, descrip=descrip) save(wim, path.join(write_dir, "blob.nii")) # saving the image of the average-signal-per-blob descrip = f"blob average signal extracted from {input_image}" wim = nroi.to_image('activation', roi=True, descrip=descrip) save(wim, path.join(write_dir, "bmap.nii")) # saving the image of the end blobs or leaves lroi = nroi.copy() lroi.reduce_to_leaves() descrip = f"blob image extracted from {input_image}" wim = lroi.to_image('id', roi=True, descrip=descrip) save(wim, path.join(write_dir, "leaves.nii")) print(f"Wrote the blob image in {path.join(write_dir, 'blob.nii')}") print(f"Wrote the blob-average signal image in {path.join(write_dir, 'bmap.nii')}") print(f"Wrote the end-blob image in {path.join(write_dir, 'leaves.nii')}") nipy-0.6.1/examples/labs/need_data/demo_roi.py000077500000000000000000000067531470056100100213170ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This is a little demo that simply shows ROI manipulation within the nipy framework. Needs matplotlib Author: Bertrand Thirion, 2009-2010 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import load, save from nipy.labs.spatial_models import hroi, mroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image # paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') mask_image = path.join(DATA_DIR, 'mask.nii.gz') if (not path.exists(input_image)) or (not path.exists(mask_image)): get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # ----------------------------------------------------- # example 1: create the ROI from a given position # ----------------------------------------------------- position = np.array([[0, 0, 0]]) domain = grid_domain_from_image(mask_image) roi = mroi.subdomain_from_balls(domain, position, np.array([5.0])) roi_domain = domain.mask(roi.label > -1) dom_img = roi_domain.to_image() save(dom_img, path.join(write_dir, "myroi.nii")) print(f"Wrote an ROI mask image in {path.join(write_dir, 'myroi.nii')}") # ---------------------------------------------------- # ---- example 2: create ROIs from a blob image ------ # ---------------------------------------------------- # --- 2.a create the blob image # parameters threshold = 3.0 # blob-forming threshold smin = 10 # size threshold on bblobs # prepare the data nim = load(input_image) affine = nim.affine shape = nim.shape data = nim.get_fdata() values = data[data != 0] # compute the nested roi object nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=threshold, smin=smin) # saving the blob image, i.e. a label image wim = nroi.to_image('id', roi=True) descrip = f"blob image extracted from {input_image}" blobPath = path.join(write_dir, "blob.nii") save(wim, blobPath) # --- 2.b take blob having id "132" as an ROI roi = nroi.copy() roi.select_roi([132]) wim2 = roi.to_image() roi_path_2 = path.join(write_dir, "roi_blob_1.nii") save(wim2, roi_path_2) # --- 2.c take the blob closest to 'position as an ROI' roi = mroi.subdomain_from_position_and_image(wim, position[0]) wim3 = roi.to_image() roi_path_3 = path.join(write_dir, "blob_closest_to_%d_%d_%d.nii" % (position[0][0], position[0][1], position[0][2])) save(wim3, roi_path_3) # --- 2.d make a set of ROIs from all the blobs roi = mroi.subdomain_from_image(blobPath) data = load(input_image).get_fdata().ravel() feature_activ = [data[roi.select_id(id, roi=False)] for id in roi.get_id()] roi.set_feature('activ', feature_activ) roi.plot_feature('activ') wim4 = roi.to_image() roi_path_4 = path.join(write_dir, "roi_all_blobs.nii") save(wim4, roi_path_4) # ---- 2.e the same, a bit more complex valid_roi = roi.get_id()[roi.representative_feature('activ') > 4.0] roi.select_roi(valid_roi) wim5 = roi.to_image() roi_path_5 = path.join(write_dir, "roi_some_blobs.nii") save(wim5, roi_path_5) print(f"Wrote ROI mask images in {roi_path_2}, \n {roi_path_3} \n {roi_path_4} \n and {roi_path_5}") plt.show() nipy-0.6.1/examples/labs/need_data/demo_ward_clustering.py000077500000000000000000000024221470056100100237070ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This shows the effect of ward clustering on a real fMRI dataset Author: Bertrand Thirion, 2010 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import Nifti1Image, load, save from nipy.algorithms.graph.field import Field # paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') mask_image = path.join(DATA_DIR, 'mask.nii.gz') if (not path.exists(mask_image)) or (not path.exists(input_image)): get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # read the data mask = load(mask_image).get_fdata() > 0 ijk = np.array(np.where(mask)).T nvox = ijk.shape[0] data = load(input_image).get_fdata()[mask] image_field = Field(nvox) image_field.from_3d_grid(ijk, k=6) image_field.set_field(data) u, _ = image_field.ward(100) # write the results label_image = path.join(write_dir, 'label.nii') wdata = mask - 1 wdata[mask] = u save(Nifti1Image(wdata, load(mask_image).affine), label_image) print(f"Label image written in {label_image}") nipy-0.6.1/examples/labs/need_data/example_roi_and_glm.py000077500000000000000000000151371470056100100235030ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This is an example where: 1. A sequence of fMRI volumes are loaded 2. An ROI mask is loaded 3. A design matrix describing all the effects related to the data is computed 4. A GLM is applied to all voxels in the ROI 5. A summary of the results is provided for certain contrasts 6. A plot of the HRF is provided for the mean response in the HRF 7. Fitted/adjusted response plots are provided Needs matplotlib Author : Bertrand Thirion, 2010 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import load, save from nipy.labs.spatial_models import mroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image from nipy.labs.utils.simul_multisubject_fmri_dataset import surrogate_4d_dataset from nipy.modalities.fmri.design_matrix import dmtx_light from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm from nipy.modalities.fmri.glm import GeneralLinearModel ####################################### # Simulation parameters ####################################### # volume mask mask_path = path.join(DATA_DIR, 'mask.nii.gz') if not path.exists(mask_path): get_second_level_dataset() mask = load(mask_path) mask_array, affine = mask.get_fdata() > 0, mask.affine # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) conditions = np.arange(20) % 2 onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20) # in seconds hrf_model = 'canonical' motion = np.cumsum(np.random.randn(n_scans, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) ######################################## # Design matrix ######################################## paradigm = np.vstack([conditions, onsets]).T paradigm = EventRelatedParadigm(conditions, onsets) X, names = dmtx_light(frametimes, paradigm, drift_model='cosine', hfcut=128, hrf_model=hrf_model, add_regs=motion, add_reg_names=add_reg_names) ######################################## # Create ROIs ######################################## positions = np.array([[60, -30, 5], [50, 27, 5]]) # in mm (here in the MNI space) radii = np.array([8, 6]) domain = grid_domain_from_image(mask) my_roi = mroi.subdomain_from_balls(domain, positions, radii) # to save an image of the ROIs save(my_roi.to_image(), path.join(write_dir, "roi.nii")) ####################################### # Get the FMRI data ####################################### fmri_data = surrogate_4d_dataset(mask=mask, dmtx=X)[0] Y = fmri_data.get_fdata()[mask_array] # artificially added signal in ROIs to make the example more meaningful activation = 30 * (X.T[1] + .5 * X.T[0]) for (position, radius) in zip(positions, radii): Y[((domain.coord - position) ** 2).sum(1) < radius ** 2 + 1] += activation ######################################## # Perform a GLM analysis ######################################## # GLM fit glm = GeneralLinearModel(X) glm.fit(Y.T) # specify the contrast [1 -1 0 ..] contrast = np.hstack((1, -1, np.zeros(X.shape[1] - 2))) # compute the contrast image related to it zvals = glm.contrast(contrast).z_score() ######################################## # ROI-based analysis ######################################## # exact the time courses with ROIs signal_feature = [Y[my_roi.select_id(id, roi=False)] for id in my_roi.get_id()] my_roi.set_feature('signal', signal_feature) # ROI average time courses my_roi.set_roi_feature('signal_avg', my_roi.representative_feature('signal')) # roi-level contrast average contrast_feature = [zvals[my_roi.select_id(id, roi=False)] for id in my_roi.get_id()] my_roi.set_feature('contrast', contrast_feature) my_roi.set_roi_feature('contrast_avg', my_roi.representative_feature('contrast')) ######################################## # GLM analysis on the ROI average time courses ######################################## n_reg = len(names) roi_tc = my_roi.get_roi_feature('signal_avg') glm.fit(roi_tc.T) plt.figure() plt.subplot(1, 2, 1) betas = glm.get_beta() b1 = plt.bar(np.arange(n_reg - 1), betas[:-1, 0], width=.4, color='blue', label='region 1') b2 = plt.bar(np.arange(n_reg - 1) + 0.3, betas[:- 1, 1], width=.4, color='red', label='region 2') plt.xticks(np.arange(n_reg - 1), names[:-1], fontsize=10) plt.legend() plt.title('Parameter estimates \n for the roi time courses') bx = plt.subplot(1, 2, 2) my_roi.plot_feature('contrast', bx) ######################################## # fitted and adjusted response ######################################## res = np.hstack([x.resid for x in glm.results_.values()]).T betas = np.hstack([x.theta for x in glm.results_.values()]) proj = np.eye(n_reg) proj[2:] = 0 fit = np.dot(np.dot(betas.T, proj), X.T) # plot it plt.figure() for k in range(my_roi.k): plt.subplot(my_roi.k, 1, k + 1) plt.plot(fit[k]) plt.plot(fit[k] + res[k], 'r') plt.xlabel('time (scans)') plt.legend(('effects', 'adjusted')) ########################################### # hrf for condition 1 ############################################ fir_order = 6 X_fir, _ = dmtx_light( frametimes, paradigm, hrf_model='fir', drift_model='cosine', drift_order=3, fir_delays=np.arange(fir_order), add_regs=motion, add_reg_names=add_reg_names) glm_fir = GeneralLinearModel(X_fir) plt.figure() for k in range(my_roi.k): # fit a glm on the ROI's time course glm_fir.fit(roi_tc[k]) # access to the corresponding result structure res = list(glm_fir.results_.values())[0] # only one value in this case plt.subplot(1, my_roi.k, k + 1) # get the confidence intervals for the effects and plot them -condition 0 conf_int = res.conf_int(cols=np.arange(fir_order)).squeeze() yerr = (conf_int[:, 1] - conf_int[:, 0]) / 2 plt.errorbar(np.arange(fir_order), conf_int.mean(1), yerr=yerr) # get the confidence intervals for the effects and plot them -condition 1 conf_int = res.conf_int(cols=np.arange(fir_order, 2 * fir_order)).squeeze() yerr = (conf_int[:, 1] - conf_int[:, 0]) / 2 plt.errorbar(np.arange(fir_order), conf_int.mean(1), yerr=yerr) plt.legend(('condition c0', 'condition c1')) plt.title('estimated hrf shape') plt.xlabel('time(scans)') plt.show() nipy-0.6.1/examples/labs/need_data/first_level_fiac.py000077500000000000000000000074601470056100100230160ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Script that performs a first-level analysis of the FIAC dataset. See ``examples/fiac/fiac_example.py`` for another approach to this analysis. Needs the *example data* package. Also needs matplotlib Author: Alexis Roche, Bertrand Thirion, 2009--2012 """ from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.glm import FMRILinearModel from nipy.utils import example_data # ----------------------------------------------------------- # --------- Get the data ----------------------------------- #----------------------------------------------------------- fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') def make_fiac_contrasts(p): """Specify some contrasts for the FIAC experiment Parameters ========== p: int, the number of columns of the design matrix (for all sessions) """ con = {} # the design matrices of both runs comprise 13 columns # the first 5 columns of the design matrices correspond to the following # conditions: ["SSt-SSp", "SSt-DSp", "DSt-SSp", "DSt-DSp", "FirstSt"] def length_p_vector(con, p): return np.hstack((con, np.zeros(p - len(con)))) con["SStSSp_minus_DStDSp"] = length_p_vector([1, 0, 0, - 1], p) con["DStDSp_minus_SStSSp"] = length_p_vector([- 1, 0, 0, 1], p) con["DSt_minus_SSt"] = length_p_vector([- 1, - 1, 1, 1], p) con["DSp_minus_SSp"] = length_p_vector([- 1, 1, - 1, 1], p) con["DSt_minus_SSt_for_DSp"] = length_p_vector([0, - 1, 0, 1], p) con["DSp_minus_SSp_for_DSt"] = length_p_vector([0, 0, - 1, 1], p) con["Deactivation"] = length_p_vector([- 1, - 1, - 1, - 1, 4], p) con["Effects_of_interest"] = np.eye(p)[:5] return con # compute fixed effects of the two runs and compute related images n_regressors = np.load(design_files[0])['X'].shape[1] # note: implicitly assume the same shape for all sessions ! contrasts = make_fiac_contrasts(n_regressors) # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print('Computing contrasts...') mean_map = multi_session_model.means[0] # for display for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, f'{contrast_id}_z_map.nii') z_map, = multi_session_model.contrast( [contrast_val] * 2, con_id=contrast_id, output_z=True) save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == 'Effects_of_interest': vmax = max(- z_map.get_fdata().min(), z_map.get_fdata().max()) vmin = - vmax plot_map(z_map.get_fdata(), z_map.affine, anat=mean_map.get_fdata(), anat_affine=mean_map.affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True) plt.savefig(path.join(write_dir, f'{contrast_id}_z_map.png')) print(f"All the results were witten in {write_dir}") plt.show() nipy-0.6.1/examples/labs/need_data/get_data_light.py000077500000000000000000000067041470056100100224550ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Get two images from the web (one mask image and one spmT image) and put them in the nipy user dir - usually therefore at ``~/.nipy/tests/data``. Author : Bertrand Thirion, 2009 """ import os try: from urllib2 import urlopen # Python 2 except ImportError: from urllib.request import urlopen # Python 3 import tarfile from nibabel.data import get_nipy_user_dir NIPY_DIR = get_nipy_user_dir() DATA_DIR = os.path.join(NIPY_DIR, 'tests', 'data') def get_second_level_dataset(): """ Lightweight dataset for multi-subject analysis """ # define several paths url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy' mask_image = os.path.join(DATA_DIR, 'mask.nii.gz') input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz') group_data = os.path.join(DATA_DIR, 'group_t_images.tar.gz') # if needed create DATA_DIR if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) assert os.path.exists(DATA_DIR) # download mask_image if necessary if not os.path.exists(mask_image): filename = 'mask.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(mask_image, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download input_image if necessary if not os.path.exists(input_image): filename = 'spmT_0029.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(input_image, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download group_data if necessary if not os.path.exists(group_data): filename = 'group_t_images.tar.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(group_data, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # untargzip group_data tar = tarfile.open(group_data) tar.extractall(DATA_DIR) tar.close() os.remove(group_data) return DATA_DIR def get_first_level_dataset(): """ Heavier dataset (30 MO) for first-level analysis """ # define several paths url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy' raw_fmri = os.path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm = os.path.join(DATA_DIR, 'localizer_paradigm.csv') # create DATA_DIR if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) assert os.path.exists(DATA_DIR) # download mask_image if necessary if not os.path.exists(paradigm): print('Downloading mask image, this may take time') datafile = os.path.join(url, 'localizer_paradigm.csv') fp = urlopen(datafile) local_file = open(paradigm, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download raw_fmri if necessary if not os.path.exists(raw_fmri): print('Downloading fmri image, this may take time') filename = 's12069_swaloc1_corr.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(raw_fmri, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() return DATA_DIR if __name__ == '__main__': get_second_level_dataset() nipy-0.6.1/examples/labs/need_data/glm_beta_and_variance.py000077500000000000000000000071261470056100100237610ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = \ """ This example shows how to get variance and beta estimated from a nipy GLM. More specifically: 1. A sequence of fMRI volumes are loaded. 2. A design matrix describing all the effects related to the data is computed. 3. A GLM is applied to the dataset, effect and variance images are produced. Note that this corresponds to a single run. Needs matplotlib Author : Bertrand Thirion, 2010--2012 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_first_level_dataset from nibabel import Nifti1Image, save from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import load_paradigm_from_csv_file from nipy.modalities.fmri.glm import FMRILinearModel ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # confounds hrf_model = 'canonical' drift_model = "cosine" hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print(f'Computation will be performed in directory: {write_dir}') ######################################## # Design matrix ######################################## print('Loading design matrix...') # the example example.labs.write_paradigm_file shows how to create this file paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) dim = design_matrix.matrix.shape[1] ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_fdata() > 0 # output beta images beta_map = np.tile(mask.astype(np.float64)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset') save(beta_image, path.join(write_dir, 'beta.nii')) print(f"Beta image witten in {write_dir}") variance_map = mask.astype(np.float64) variance_map[mask] = variance_hat # Create a snapshots of the variance image contrasts vmax = np.log(variance_hat.max()) plot_map(np.log(variance_map + .1), fmri_glm.affine, cmap=cm.hot_black_bone, vmin=np.log(0.1), vmax=vmax, anat=None, threshold=.1, alpha=.9) plt.show() nipy-0.6.1/examples/labs/need_data/group_reproducibility_analysis.py000077500000000000000000000076371470056100100260540ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of script to analyse the reproducibility in group studies using a bootstrap procedure. This reproduces approximately the work described in 'Analysis of a large fMRI cohort: Statistical and methodological issues for group analyses' Thirion B, Pinel P, Meriaux S, Roche A, Dehaene S, Poline JB. Neuroimage. 2007 Mar;35(1):105-20. Needs matplotlib Author: Bertrand Thirion, 2005-2009 """ from os import getcwd, mkdir, path from numpy import array try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nipy.labs.utils.reproducibility_measures import group_reproducibility_metrics print('This analysis takes a long while, please be patient') ############################################################################## # Set the paths, data, etc. ############################################################################## nsubj = 12 nbeta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nsubj)] stat_images = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nsubj)] contrast_images = [path.join(data_dir, 'con_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nsubj)] all_images = mask_images + stat_images + contrast_images missing_file = array([not path.exists(m) for m in all_images]).any() if missing_file: get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) ############################################################################## # main script ############################################################################## ngroups = [4] thresholds = [3.0, 4.0, 5.0] sigma = 6.0 csize = 10 niter = 10 method = 'crfx' verbose = 0 swap = False voxel_results, cluster_results, peak_results = group_reproducibility_metrics( mask_images, contrast_images, [], thresholds, ngroups, method, cluster_threshold=csize, number_of_samples=niter, sigma=sigma, do_clusters=True, do_voxels=True, do_peaks=True, swap=swap) kap = list(voxel_results[ngroups[0]].values()) clt = list(cluster_results[ngroups[0]].values()) pk = list(peak_results[ngroups[0]].values()) ############################################################################## # plot ############################################################################## plt.figure() plt.subplot(1, 3, 1) plt.boxplot(kap) plt.title('voxel-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') plt.subplot(1, 3, 2) plt.boxplot(clt) plt.title('cluster-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') plt.subplot(1, 3, 3) plt.boxplot(clt) plt.title('cluster-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') ############################################################################## # create an image ############################################################################## """ # this is commented until a new version of the code allows it # with the adequate level of abstraction th = 4.0 swap = False kwargs = {'threshold':th,'csize':csize} rmap = map_reproducibility(Functional, VarFunctional, grp_mask, ngroups, method, swap, verbose, **kwargs) wmap = mask.astype(np.int_) wmap[mask] = rmap wim = Nifti1Image(wmap, affine) wim.get_header()['descrip']= 'reproducibility map at threshold %f, \ cluster size %d'%(th,csize) wname = path.join(write_dir,'repro.nii') save(wim, wname) print('Wrote a reproducibility image in %s'%wname) """ nipy-0.6.1/examples/labs/need_data/histogram_fits.py000077500000000000000000000037071470056100100225400ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of a script that performs histogram analysis of an activation image. This is based on a real fMRI image. Simply modify the input image path to make it work on your preferred image. Needs matplotlib Author : Bertrand Thirion, 2008-2009 """ import os import numpy as np import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import load import nipy.algorithms.statistics.empirical_pvalue as en # parameters verbose = 1 theta = float(st.t.isf(0.01, 100)) # paths mask_image = os.path.join(DATA_DIR, 'mask.nii.gz') input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz') if (not os.path.exists(mask_image)) or (not os.path.exists(input_image)): get_second_level_dataset() # Read the mask nim = load(mask_image) mask = nim.get_fdata() # read the functional image rbeta = load(input_image) beta = rbeta.get_fdata() beta = beta[mask > 0] mf = plt.figure(figsize=(13, 5)) a1 = plt.subplot(1, 3, 1) a2 = plt.subplot(1, 3, 2) a3 = plt.subplot(1, 3, 3) # fit beta's histogram with a Gamma-Gaussian mixture bfm = np.array([2.5, 3.0, 3.5, 4.0, 4.5]) bfp = en.gamma_gaussian_fit(beta, bfm, verbose=1, mpaxes=a1) # fit beta's histogram with a mixture of Gaussians alpha = 0.01 pstrength = 100 bfq = en.three_classes_GMM_fit(beta, bfm, alpha, pstrength, verbose=1, mpaxes=a2) # fit the null mode of beta with the robust method efdr = en.NormalEmpiricalNull(beta) efdr.learn() efdr.plot(bar=0, mpaxes=a3) a1.set_title('Fit of the density with \n a Gamma-Gaussian mixture') a2.set_title('Fit of the density with \n a mixture of Gaussians') a3.set_title('Robust fit of the density \n with a single Gaussian') plt.show() nipy-0.6.1/examples/labs/need_data/localizer_glm_ar.py000077500000000000000000000123471470056100100230230ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Full step-by-step example of fitting a GLM to experimental data and visualizing the results. More specifically: 1. A sequence of fMRI volumes are loaded 2. A design matrix describing all the effects related to the data is computed 3. a mask of the useful brain volume is computed 4. A GLM is applied to the dataset (effect/covariance, then contrast estimation) Note that this corresponds to a single run. Needs matplotlib Author : Bertrand Thirion, 2010--2012 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_first_level_dataset from nibabel import save from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import load_paradigm_from_csv_file from nipy.modalities.fmri.glm import FMRILinearModel ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans) # confounds hrf_model = 'canonical with derivative' drift_model = "cosine" hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print(f'Computation will be performed in directory: {write_dir}') ######################################## # Design matrix ######################################## print('Loading design matrix...') paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) ######################################### # Specify the contrasts ######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts[f'{design_matrix.names[2 * i]}'] = np.eye(n_columns)[2 * i] # and more complex/ interesting ones contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\ contrasts["calculaudio"] + contrasts["phraseaudio"] contrasts["video"] = contrasts["clicDvideo"] + contrasts["clicGvideo"] + \ contrasts["calculvideo"] + contrasts["phrasevideo"] contrasts["left"] = contrasts["clicGaudio"] + contrasts["clicGvideo"] contrasts["right"] = contrasts["clicDaudio"] + contrasts["clicDvideo"] contrasts["computation"] = contrasts["calculaudio"] + contrasts["calculvideo"] contrasts["sentences"] = contrasts["phraseaudio"] + contrasts["phrasevideo"] contrasts["H-V"] = contrasts["damier_H"] - contrasts["damier_V"] contrasts["V-H"] = contrasts["damier_V"] - contrasts["damier_H"] contrasts["left-right"] = contrasts["left"] - contrasts["right"] contrasts["right-left"] = contrasts["right"] - contrasts["left"] contrasts["audio-video"] = contrasts["audio"] - contrasts["video"] contrasts["video-audio"] = contrasts["video"] - contrasts["audio"] contrasts["computation-sentences"] = contrasts["computation"] - \ contrasts["sentences"] contrasts["reading-visual"] = contrasts["sentences"] * 2 - \ contrasts["damier_H"] - contrasts["damier_V"] contrasts['effects_of_interest'] = np.eye(n_columns)[:20:2] ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) # save the z_image image_path = path.join(write_dir, f'{contrast_id}_z_map.nii') z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path) # Create snapshots of the contrasts vmax = max(- z_map.get_fdata().min(), z_map.get_fdata().max()) if index > 0: plt.clf() plot_map(z_map.get_fdata(), z_map.affine, cmap=cm.cold_hot, vmin=- vmax, vmax=vmax, anat=None, cut_coords=None, slicer='z', black_bg=True, # looks much better thus figure=10, threshold=2.5) plt.savefig(path.join(write_dir, f'{contrast_id}_z_map.png')) print(f"All the results were witten in {write_dir}") plt.show() nipy-0.6.1/examples/labs/need_data/one_sample_t_test.py000077500000000000000000000052601470056100100232160ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of a one-sample t-test using the GLM formalism. This script takes individual contrast images and masks and runs a simple GLM. This can be readily generalized to any design matrix. This particular example shows the statical map of a contrast related to a computation task (subtraction of computation task minus sentence reading/listening). Needs matplotlib. Author : Bertrand Thirion, 2012 """ print(__doc__) #autoindent from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import Nifti1Image, concat_images, load, save from nipy.labs.mask import intersect_masks from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.glm import FMRILinearModel # Get the data n_subjects = 12 n_beta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(n_subjects)] betas = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (n_beta, n)) for n in range(n_subjects)] missing_files = np.array([not path.exists(m) for m in mask_images + betas]) if missing_files.any(): get_second_level_dataset() write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # Compute a population-level mask as the intersection of individual masks grp_mask = Nifti1Image(intersect_masks(mask_images).astype(np.int8), load(mask_images[0]).affine) # concatenate the individual images first_level_image = concat_images(betas) # set the model design_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask) # GLM fitting using ordinary least_squares grp_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array([[1]]) # the only possible contrast ! z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True) # write the results save(z_map, path.join(write_dir, 'one_sample_z_map.nii')) # look at the result vmax = max(- z_map.get_fdata().min(), z_map.get_fdata().max()) vmin = - vmax plot_map(z_map.get_fdata(), z_map.affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, threshold=3., black_bg=True) plt.savefig(path.join(write_dir, f'one_sample_z_map.png')) plt.show() print(f"Wrote all the results in directory {write_dir}") nipy-0.6.1/examples/labs/need_data/parcel_intra.py000077500000000000000000000027111470056100100221530ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of script to parcellate the data from one subject, using various algorithms. Note that it can take some time. author: Bertrand Thirion, 2005-2009 """ print(__doc__) from os import getcwd, mkdir, path # Local import from get_data_light import DATA_DIR, get_second_level_dataset from numpy import array from nipy.labs.spatial_models.parcel_io import fixed_parcellation # ------------------------------------ # Get the data (mask+functional image) # take several experimental conditions # time courses could be used instead n_beta = [29] mask_image = path.join(DATA_DIR, 'mask.nii.gz') betas = [path.join(DATA_DIR, 'spmT_%04d.nii.gz' % n) for n in n_beta] missing_file = array([not path.exists(m) for m in [mask_image] + betas]).any() if missing_file: get_second_level_dataset() # set the parameters n_parcels = 500 mu = 10 nn = 6 verbose = 1 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'gkm', write_dir, mu, verbose) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'ward', write_dir, mu, verbose) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'ward_and_gkm', write_dir, mu, verbose) nipy-0.6.1/examples/labs/need_data/parcel_multisubj.py000077500000000000000000000040211470056100100230500ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of script to parcellate mutli-subject data. May take some time to complete. Author: Bertrand Thirion, 2005-2009 """ from os import getcwd, mkdir, path # Local import from get_data_light import DATA_DIR, get_second_level_dataset from numpy import array from nipy.labs.spatial_models.hierarchical_parcellation import hparcel from nipy.labs.spatial_models.parcel_io import ( parcel_input, parcellation_based_analysis, write_parcellation_images, ) # Get the data nb_subj = 12 subj_id = ['subj_%02d' % s for s in range(nb_subj)] nbeta = '0029' data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nb_subj)] learn_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n)) for n in range(nb_subj)] missing_file = array( [not path.exists(m) for m in mask_images + learn_images]).any() learn_images = [[m] for m in learn_images] if missing_file: get_second_level_dataset() # parameter for the intersection of the mask ths = .5 # number of parcels nbparcel = 200 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # prepare the parcel structure domain, ldata = parcel_input(mask_images, learn_images, ths) # run the algorithm fpa = hparcel(domain, ldata, nbparcel, verbose=1) # produce some output images write_parcellation_images(fpa, subject_id=subj_id, swd=write_dir) # do some parcellation-based analysis: # take some test images whose parcel-based signal needs to be assessed test_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n)) for n in range(nb_subj)] # compute and write the parcel-based statistics rfx_path = path.join(write_dir, f'prfx_{nbeta}.nii') parcellation_based_analysis(fpa, test_images, 'one_sample', rfx_path=rfx_path) print(f"Wrote everything in {write_dir}") nipy-0.6.1/examples/labs/need_data/permutation_test.py000077500000000000000000000027631470056100100231250ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of onesample permutation test Needs *example data* package """ import numpy as np from nipy.labs.group.permutation_test import permutation_test_onesample from nipy.utils import example_data # Get group data group_data = example_data.get_filename('neurospin', 'language_babies', 'offset_002.npz') f = np.load(group_data) data, vardata, xyz = f['mat'], f['var'], f['xyz'] # Create one-sample permutation test instance ptest = permutation_test_onesample(data, xyz, stat_id='wilcoxon') # Cluster definition: (threshold, diameter) # Note that a list of definitions can be passed to ptest.calibrate cluster_def = (ptest.height_threshold(0.01), None) print(cluster_def) # Multiple calibration # To get accurate pvalues, don't pass nperms (default is 1e4) # Yet it will take longer to run voxel_res, cluster_res, region_res = ptest.calibrate(nperms=100, clusters=[cluster_def]) # Simulated Zmax values for FWER correction simu_zmax = ptest.zscore(voxel_res['perm_maxT_values']) # Output regions ## This is a list because several cluster definitions can be accepted clusters = cluster_res[0] sizes = clusters['size_values'] clusters_Pcorr = clusters['size_Corr_p_values'] # Simulated cluster sizes simu_s = clusters['perm_size_values'] simu_smax = clusters['perm_maxsize_values'] nipy-0.6.1/examples/labs/need_data/plot_registration.py000077500000000000000000000017541470056100100232660ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of plotting a registration checker with nipy.labs visualization tools The idea is to represent the anatomical image to be checked with an overlay of the edges of the reference image. This idea is borrowed from FSL. Needs the *templates* data package. Needs matplotlib. """ print(__doc__) try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs import viz from nipy.labs.viz_tools import anat_cache # Get the data. Here we are using the reference T1 image anat, affine, _ = anat_cache._AnatCache.get_anat() # Here we use the same image as a reference. As a result it is perfectly # aligned. reference = anat reference_affine = affine slicer = viz.plot_anat(anat, affine, dim=.2, black_bg=True) slicer.edge_map(reference, reference_affine) plt.show() nipy-0.6.1/examples/labs/need_data/tmin_statistic.py000077500000000000000000000076221470056100100225540ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example where the result of the min of two contrasts is computed and displayed. This is based on the Localizer dataset, in which we want to find the regions activated both in left and right finger tapping. Notes ----- This is the valid conjunction test discussed in: Nichols T, Brett M, Andersson J, Wager T, Poline JB. Valid conjunction inference with the minimum statistic. Neuroimage. 2005 Apr 15;25(3):653-60. Needs matplotlib Author : Bertrand Thirion, 2012 """ print(__doc__) from os import getcwd, mkdir, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_first_level_dataset from nibabel import save from nipy.labs.viz import cm, plot_map from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import load_paradigm_from_csv_file from nipy.modalities.fmri.glm import FMRILinearModel ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # confounds hrf_model = 'canonical' drift_model = 'cosine' hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print(f'Computation will be performed in directory: {write_dir}') ######################################## # Design matrix ######################################## print('Loading design matrix...') paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ######################################### # Specify the contrasts ######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts[f'{design_matrix.names[i]}'] = np.eye(n_columns)[i] # and more complex/ interesting ones contrasts['left'] = contrasts['clicGaudio'] + contrasts['clicGvideo'] contrasts['right'] = contrasts['clicDaudio'] + contrasts['clicDvideo'] ######################################## # Perform a GLM analysis ######################################## print('Fitting a General Linear Model') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast( np.vstack((contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, f'{contrast_id}_z_map.nii') save(z_map, z_image_path) contrast_path = path.join(write_dir, f'{contrast_id}_con.nii') save(effects_map, contrast_path) # note that the effects_map is two-dimensional: # these dimensions correspond to 'left' and 'right' # Create snapshots of the contrasts vmax = max(- z_map.get_fdata().min(), z_map.get_fdata().max()) plot_map(z_map.get_fdata(), fmri_glm.affine, cmap=cm.cold_hot, vmin=- vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, f'{contrast_id}_z_map.png')) plt.show() print(f'All the results were witten in {write_dir}') # Note: fancier visualization of the results are shown # in the viz3d example nipy-0.6.1/examples/labs/need_data/viz.py000077500000000000000000000035171470056100100203250ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Example of activation image visualization with nipy.labs visualization tools Needs *example data* package. Needs matplotlib """ print(__doc__) import os.path try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import get_second_level_dataset from nibabel import load from nipy.labs import viz from nipy.utils import example_data # get the data data_dir = get_second_level_dataset() # First example, with a anatomical template img = load(os.path.join(data_dir, 'spmT_0029.nii.gz')) data = img.get_fdata() affine = img.affine viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=2.0, cmap=viz.cm.cold_hot) plt.savefig('ortho_view.png') # Second example, with a given anatomical image slicing in the Z direction try: anat_img = load(example_data.get_filename('neurospin', 'sulcal2000', 'nobias_anubis.nii.gz')) anat = anat_img.get_fdata() anat_affine = anat_img.affine except OSError as e: # File does not exist: the data package is not installed print(e) anat = None anat_affine = None viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='z', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('z_view.png') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='x', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('x_view.png') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='y', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('y_view.png') plt.show() nipy-0.6.1/examples/labs/need_data/viz3d.py000077500000000000000000000034121470056100100205460ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This examples performs sifferent kinds of (2D and 3D) plots of a given activation map. Needs matplotlib. Author : Bertrand Thirion, 2012 """ print(__doc__) from os import path try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") # Local import from get_data_light import DATA_DIR, get_second_level_dataset from nibabel import load from nipy.labs.viz import cm, plot_map ####################################### # Data and analysis parameters ####################################### input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() brain_map = load(input_image) vmin, vmax = brain_map.get_fdata().min(), brain_map.get_fdata().max() # make a simple 2D plot plot_map(brain_map.get_fdata(), brain_map.affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=10, threshold=3) # More plots using 3D if True: # replace with False to skip this plot_map(brain_map.get_fdata(), brain_map.affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=11, threshold=3, do3d=True) from nipy.labs import viz3d try: viz3d.plot_map_3d(brain_map.get_fdata(), brain_map.affine, cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, threshold=4) except ImportError: print("Need mayavi for 3D visualization") plt.show() nipy-0.6.1/examples/labs/onesample_group.py000077500000000000000000000032171470056100100210050ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This modules launches a one-sample test on a dataset Statistical significance is obtained using cluster-level inference and permutation testing. Author: Alexis Roche, Bertrand Thirion 2009-2012 """ import numpy as np from nibabel import Nifti1Image as Image import nipy.labs.statistical_mapping as sm from nipy.utils import example_data def remake_images(): # Get group data group_data = example_data.get_filename( 'neurospin', 'language_babies', 'offset_002.npz') f = np.load(group_data) data, vardata, xyz = f['mat'], f['var'], f['xyz'] dX = xyz[0].max() + 1 dY = xyz[1].max() + 1 dZ = xyz[2].max() + 1 aux = np.zeros([dX, dY, dZ]) data_images = [] vardata_images = [] mask_images = [] indices = tuple(xyz) for i in range(data.shape[0]): aux[indices] = data[i] data_images.append(Image(aux.copy(), np.eye(4))) aux[indices] = vardata[i] vardata_images.append(Image(aux.copy(), np.eye(4))) aux[indices] = 1 mask_images.append(aux) return data_images, vardata_images, mask_images data_images, vardata_images, mask_images = remake_images() zimg, mask, nulls = sm.onesample_test(data_images, None, mask_images, 'wilcoxon', permutations=1024, cluster_forming_th=0.01) clusters, info = sm.cluster_stats(zimg, mask, 0.01, nulls=nulls) nipy-0.6.1/examples/labs/permutation_test_fakedata.py000077500000000000000000000070611470056100100230350ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example script for group permutation testing """ import numpy as np from nipy.labs.group import permutation_test as PT def make_data(n=10, mask_shape=(10, 10, 10), axis=0, r=3, signal=5): """ Generate Gaussian noise in a cubic volume + cubic activations """ mask = np.zeros(mask_shape, int) XYZ = np.array(np.where(mask==0)) p = XYZ.shape[1] data = np.random.randn(n, p) I = np.where(np.square(XYZ - XYZ.max(axis=1).reshape(-1, 1) / 2).sum( axis=0) <= r ** 2)[0] data[:, I] += signal vardata = np.random.randn(n, p) ** 2 if axis == 1: data = data.T vardata = vardata.T return data, vardata, XYZ ############################################################################### # Example for using permutation_test_onesample class data, vardata, XYZ = make_data() # rfx calibration P = PT.permutation_test_onesample(data, XYZ) # clusters definition (height threshold, max diameter) c = [(P.random_Tvalues[int(P.ndraws * (0.95))], None)] # regions definition (label vector) r = np.ones(data.shape[1], int) r[int(data.shape[1] / 2.):] *= 10 voxel_results, cluster_results, region_results = \ P.calibrate(nperms=100, clusters=c, regions=[r]) # mfx calibration P = PT.permutation_test_onesample(data, XYZ, vardata=vardata, stat_id="student_mfx") voxel_results, cluster_results, region_results = \ P.calibrate(nperms=100, clusters=c, regions=[r]) ############################################################################### # Example for using permutation_test_twosample class data, vardata, XYZ = make_data(n=20) data1, vardata1, data2, vardata2 = (data[:10], vardata[:10], data[10:], vardata[10:]) # rfx calibration P = PT.permutation_test_twosample(data1, data2, XYZ) c = [(P.random_Tvalues[int(P.ndraws * (0.95))], None)] voxel_results, cluster_results, region_results = P.calibrate(nperms=100, clusters=c) # mfx calibration P = PT.permutation_test_twosample(data1, data2, XYZ, vardata1=vardata1, vardata2=vardata2, stat_id="student_mfx") voxel_results, cluster_results, region_results = P.calibrate(nperms=100, clusters=c) ############################################################################### # Print cluster statistics level = 0.05 for results in cluster_results: nclust = results["labels"].max() + 1 Tmax = np.zeros(nclust, float) Tmax_P = np.zeros(nclust, float) Diam = np.zeros(nclust, int) for j in range(nclust): I = np.where(results["labels"]==j)[0] Tmax[j] = P.Tvalues[I].max() Tmax_P[j] = voxel_results["Corr_p_values"][I].min() Diam[j]= PT.max_dist(XYZ, I, I) J = np.where(1 - (results["size_Corr_p_values"] > level) * (results["Fisher_Corr_p_values"] > level) * (Tmax_P > level))[0] print("\nDETECTED CLUSTERS STATISTICS:\n") print("Cluster detection threshold:", round(results["thresh"], 2)) if results["diam"] is not None: print("minimum cluster diameter", results["diam"]) print("Cluster level FWER controlled at", level) for j in J: X, Y, Z = results["peak_XYZ"][:, j] strXYZ = str(X).zfill(2) + " " + str(Y).zfill(2) + " " + \ str(Z).zfill(2) nipy-0.6.1/examples/labs/two_sample_mixed_effects.py000077500000000000000000000013101470056100100226350ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ Demo two sample mixed effect models Needs matplotlib """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs.group import twosample n1 = 8 n2 = 8 y1 = np.random.rand(n1) v1 = .1 * np.random.rand(n1) y2 = np.random.rand(n2) v2 = .1 * np.random.rand(n2) nperms = twosample.count_permutations(n1, n2) magics = np.arange(nperms) t = twosample.stat_mfx(y1, v1, y2, v2, id='student_mfx', Magics=magics) plt.hist(t, 101) plt.show() nipy-0.6.1/examples/labs/watershed_labeling.py000077500000000000000000000063641470056100100214370ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __doc__ = """ This script generates a noisy activation image image and performs a watershed segmentation in it. Needs matplotlib Author : Bertrand Thirion, 2009--2012 """ #autoindent print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import matplotlib as mpl import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape from nipy.labs.spatial_models.hroi import HROI_from_watershed ############################################################################### # data simulation shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) x = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() th = 2.36 # compute the field structure and perform the watershed domain = grid_domain_from_shape(shape) nroi = HROI_from_watershed(domain, np.ravel(x), threshold=th) label = nroi.label #compute the region-based signal average bfm = np.array([np.mean(x.ravel()[label == k]) for k in range(label.max() + 1)]) bmap = np.zeros(x.size) if label.max() > - 1: bmap[label > - 1] = bfm[label[label > - 1]] label = np.reshape(label, shape) bmap = np.reshape(bmap, shape) ############################################################################### # plot the input image aux1 = (0 - x.min()) / (x.max() - x.min()) aux2 = (bmap.max() - x.min()) / (x.max() - x.min()) cdict = {'red': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.7), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 0.5, 0.5), (1.0, 1.0, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.figure(figsize=(12, 3)) plt.subplot(1, 3, 1) plt.imshow(np.squeeze(x), interpolation='nearest', cmap=my_cmap) plt.axis('off') plt.title('Thresholded image') cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) ############################################################################### # plot the watershed label image plt.subplot(1, 3, 2) plt.imshow(label, interpolation='nearest') plt.axis('off') plt.colorbar() plt.title('Labels') ############################################################################### # plot the watershed-average image plt.subplot(1, 3, 3) aux = 0.01 cdict = {'red': ((0.0, 0.0, 0.7), (aux, 0.7, 0.7), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 0.5, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.imshow(bmap, interpolation='nearest', cmap=my_cmap) plt.axis('off') plt.title('Label-average') cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.show() nipy-0.6.1/examples/labs/write_paradigm_file.py000077500000000000000000000043531470056100100216050ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Examples of a paradigm .csv file generation: the neurospin/localizer paradigm. See Pinel et al., BMC neuroscience 2007 for reference """ import csv import sys import numpy as np # onset times in milliseconds time = np.array([ 0, 2400, 5700, 8700, 11400, 15000, 18000, 20700, 23700, 26700, 29700, 33000, 35400, 39000, 41700, 44700, 48000, 50700, 53700, 56400, 59700, 62400, 66000, 69000, 71400, 75000, 78000, 80400, 83400, 87000, 89700, 93000, 96000, 99000, 102000, 105000, 108000, 110400, 113700, 116700, 119400, 122700, 125400, 129000, 131400, 135000, 137700, 140400, 143400, 146700, 149400, 153000, 156000, 159000, 162000, 164400, 167700, 170400, 173700, 176700, 179700, 182700, 186000, 188400, 191700, 195000, 198000, 201000, 203700, 207000, 210000, 212700, 215700, 218700, 221400, 224700, 227700, 230700, 234000, 236700, 240000, 243000, 246000, 248400, 251700, 254700, 257400, 260400, 264000, 266700, 269700, 272700, 275400, 278400, 281700, 284400, 288000, 291000, 293400, 296700]).astype('f')/1000 # corresponding onset types # Because it's cutpasted from Matlab(tm), i subtract 1 at the end ;-) # onset types trial_type = np.array([ 8, 8, 11, 1, 3, 10, 5, 10, 4, 6, 10, 2, 7, 9, 9, 7, 7, 11, 11, 9, 1, 4, 11, 5, 6, 9, 11, 11, 7, 3, 10, 11, 2, 11, 11, 11, 7, 11, 11, 6, 10, 2, 8, 11, 9, 7, 7, 2, 3, 10, 1, 8, 2, 9, 3, 8, 9, 4, 7, 1, 11, 11, 11, 1, 7, 9, 8, 8, 2, 2, 2, 6, 6, 1, 8, 1, 5, 3, 8, 10, 11, 11, 9, 1, 7, 4, 4, 8, 2, 1, 1, 11, 5, 2, 11, 10, 9, 5, 10, 10]) - 1 condition_ids = ['damier_H', 'damier_V', 'clicDaudio', 'clicGaudio', 'clicDvideo', 'clicGvideo', 'calculaudio', 'calculvideo', 'phrasevideo', 'phraseaudio'] time = time[trial_type < 10] cid = np.array([condition_ids[i] for i in trial_type[trial_type < 10]]) sess = np.zeros(np.size(time)).astype('int8') pdata = np.vstack((sess, cid, time)).T csvfile = 'localizer_paradigm.csv' fid = open(csvfile, "w", newline = '') writer = csv.writer(fid, delimiter=' ') for row in pdata: writer.writerow(row) fid.close() print(f"Created the paradigm file in {csvfile} ") nipy-0.6.1/examples/parcel_group_analysis.py000077500000000000000000000044061470056100100212530ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Example running a parcel-based second-level analysis from a set of first-level effect images. This script takes as input a directory path that contains first-level images in nifti format, as well as a group mask image and a parcellation image (such as the AAL atlas, 'ROI_MNI_V4.nii', see http://www.gin.cnrs.fr/spip.php?article217). All images are assumed to be in a common reference space, e.g. the MNI/Talairach space. It outputs three images: * tmap.nii.gz, a `t-statistic` image similar to a SPM-like second-level t-map, except it is derived under an assumption of localization uncertainty in reference space. * parcel_mu.nii.gz, an image that maps each voxel to the estimated population effect in the parcel it belongs to. * parcel_prob.nii.gz, an image that maps each voxel to the probability that the population effect in the parcel it belongs to is positive-valued. See the `nipy.algorithms.group.ParcelAnalysis` class for more general usage information. """ from argparse import ArgumentParser from glob import glob from os.path import join from nipy import load_image from nipy.algorithms.group import parcel_analysis # Parse command line description = 'Run a parcel-based second-level analysis from a set of\ first-level effect images.' parser = ArgumentParser(description=description) parser.add_argument('con_path', metavar='con_path', help='directory where 1st-level images are to be found') parser.add_argument('msk_file', metavar='msk_file', help='group mask file') parser.add_argument('parcel_file', metavar='parcel_file', help='parcellation image file') args = parser.parse_args() # Load first-level images con_files = glob(join(args.con_path, '*.nii')) con_imgs = [load_image(f) for f in con_files] # Load group mask msk_img = load_image(args.msk_file) # Load parcellation parcel_img = load_image(args.parcel_file) # Run parcel analysis and write output images in the current directory effect_img, proba_img = parcel_analysis(con_imgs, parcel_img, msk_img=msk_img, fwhm=8, res_path='.') nipy-0.6.1/examples/space_time_realign.py000077500000000000000000000035651470056100100205050ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This script requires the nipy-data package to run. It is an example of simultaneous motion correction and slice timing correction in multi-session fMRI data from the FIAC 2005 dataset. Specifically, it uses the first two sessions of subject 'fiac0'. Usage: python space_time_realign.py Two images will be created in the working directory for the realigned series:: rarun1.nii rarun2.nii Author: Alexis Roche, 2009. """ import os from os.path import abspath from os.path import split as psplit import numpy as np from nipy import load_image, save_image from nipy.algorithms.registration import SpaceTimeRealign from nipy.utils import example_data # Input images are provided with the nipy-data package runnames = [example_data.get_filename('fiac', 'fiac0', run + '.nii.gz') for run in ('run1', 'run2')] runs = [load_image(run) for run in runnames] # Spatio-temporal realigner assuming interleaved ascending slice order R = SpaceTimeRealign(runs, tr=2.5, slice_times='asc_alt_2', slice_info=2) # If you are not sure what the above is doing, you can alternatively # declare slice times explicitly using the following equivalent code """ tr = 2.5 nslices = runs[0].shape[2] slice_times = (tr / float(nslices)) *\ np.argsort(range(0, nslices, 2) + range(1, nslices, 2)) print('Slice times: %s' % slice_times) R = SpaceTimeRealign(runs, tr=tr, slice_times=slice_times, slice_info=2) """ # Estimate motion within- and between-sessions R.estimate(refscan=None) # Resample data on a regular space+time lattice using 4d interpolation # Save images cwd = abspath(os.getcwd()) print(f'Saving results in: {cwd}') for i in range(len(runs)): corr_run = R.resample(i) fname = 'ra' + psplit(runnames[i])[1] save_image(corr_run, fname) nipy-0.6.1/examples/tissue_classification.py000077500000000000000000000061621470056100100212560ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Script example of tissue classification """ from argparse import ArgumentParser import numpy as np from nipy import load_image, save_image from nipy.algorithms.segmentation import BrainT1Segmentation from nipy.core.image.image_spaces import make_xyz_image, xyz_affine def fuzzy_dice(gold_ppm, ppm, mask): """ Fuzzy dice index. """ dices = np.zeros(3) if gold_ppm is None: return dices for k in range(3): pk = gold_ppm[mask][:, k] qk = ppm[mask][:, k] PQ = np.sum(np.sqrt(np.maximum(pk * qk, 0))) P = np.sum(pk) Q = np.sum(qk) dices[k] = 2 * PQ / float(P + Q) return dices # Parse command line description = 'Perform brain tissue classification from skull stripped T1 \ image in CSF, GM and WM. If no mask image is provided, the mask is defined by \ thresholding the input image above zero (strictly).' parser = ArgumentParser(description=description) parser.add_argument('img', metavar='img', nargs='+', help='input image') parser.add_argument('--mask', dest='mask', help='mask image') parser.add_argument('--niters', dest='niters', help='number of iterations (default=%d)' % 25) parser.add_argument('--beta', dest='beta', help=f'Markov random field beta parameter (default={0.5:f})') parser.add_argument('--ngb_size', dest='ngb_size', help='Markov random field neighborhood system (default=%d)' % 6) parser.add_argument('--probc', dest='probc', help='csf probability map') parser.add_argument('--probg', dest='probg', help='gray matter probability map') parser.add_argument('--probw', dest='probw', help='white matter probability map') args = parser.parse_args() def get_argument(dest, default): val = args.__getattribute__(dest) if val is None: return default else: return val # Input image img = load_image(args.img[0]) # Input mask image mask_img = get_argument('mask', None) if mask_img is None: mask_img = img else: mask_img = load_image(mask_img) # Other optional arguments niters = int(get_argument('niters', 25)) beta = float(get_argument('beta', 0.5)) ngb_size = int(get_argument('ngb_size', 6)) # Perform tissue classification mask = mask_img.get_fdata() > 0 S = BrainT1Segmentation(img.get_fdata(), mask=mask, model='5k', niters=niters, beta=beta, ngb_size=ngb_size) # Save label image outfile = 'hard_classif.nii' save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'), outfile) print(f'Label image saved in: {outfile}') # Compute fuzzy Dice indices if a 3-class fuzzy model is provided if args.probc is not None and \ args.probg is not None and \ args.probw is not None: print('Computing Dice index') gold_ppm = np.zeros(S.ppm.shape) gold_ppm_img = (args.probc, args.probg, args.probw) for k in range(3): img = load_image(gold_ppm_img[k]) gold_ppm[..., k] = img.get_fdata() d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_fdata() > 0)) print(f'Fuzzy Dice indices: {d}') nipy-0.6.1/lib/000077500000000000000000000000001470056100100132355ustar00rootroot00000000000000nipy-0.6.1/lib/fff/000077500000000000000000000000001470056100100137765ustar00rootroot00000000000000nipy-0.6.1/lib/fff/fff_array.c000066400000000000000000000474701470056100100161150ustar00rootroot00000000000000#include "fff_array.h" #include #include /* Static functions */ static double _get_uchar(const char* data, size_t pos); static double _get_schar(const char* data, size_t pos); static double _get_ushort(const char* data, size_t pos); static double _get_sshort(const char* data, size_t pos); static double _get_uint(const char* data, size_t pos); static double _get_int(const char* data, size_t pos); static double _get_ulong(const char* data, size_t pos); static double _get_long(const char* data, size_t pos); static double _get_float(const char* data, size_t pos); static double _get_double(const char* data, size_t pos); static void _set_uchar(char* data, size_t pos, double value); static void _set_schar(char* data, size_t pos, double value); static void _set_ushort(char* data, size_t pos, double value); static void _set_sshort(char* data, size_t pos, double value); static void _set_uint(char* data, size_t pos, double value); static void _set_int(char* data, size_t pos, double value); static void _set_ulong(char* data, size_t pos, double value); static void _set_long(char* data, size_t pos, double value); static void _set_float(char* data, size_t pos, double value); static void _set_double(char* data, size_t pos, double value); static void _fff_array_iterator_update1d(void* it); static void _fff_array_iterator_update2d(void* it); static void _fff_array_iterator_update3d(void* it); static void _fff_array_iterator_update4d(void* it); /* Creates a C-contiguous array. */ fff_array* fff_array_new(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ, size_t dimT) { fff_array* thisone; size_t nvoxels = dimX*dimY*dimZ*dimT; size_t aux, offX, offY, offZ, offT; /* Offset computation */ offT = 1; aux = dimT; offZ = aux; aux *= dimZ; offY = aux; aux *= dimY; offX = aux; /* Instantiate the structure member */ thisone = (fff_array*)malloc(sizeof(fff_array)); if (thisone==NULL) { FFF_ERROR("Out of memory", ENOMEM); return NULL; } /* Set dimensions, offsets and accessors */ *thisone = fff_array_view(datatype, NULL, dimX, dimY, dimZ, dimT, offX, offY, offZ, offT); /* Gives ownership */ thisone->owner = 1; /* Allocate the image buffer */ switch(datatype) { case FFF_UCHAR: { unsigned char* buf = (unsigned char*)calloc(nvoxels, sizeof(unsigned char)); thisone->data = (void*)buf; } break; case FFF_SCHAR: { signed char* buf = (signed char*)calloc(nvoxels, sizeof(signed char)); thisone->data = (void*)buf; } break; case FFF_USHORT: { unsigned short* buf = (unsigned short*)calloc(nvoxels, sizeof(unsigned short)); thisone->data = (void*)buf; } break; case FFF_SSHORT: { signed short* buf = (signed short*)calloc(nvoxels, sizeof(signed short)); thisone->data = (void*)buf; } break; case FFF_UINT: { unsigned int* buf = (unsigned int*)calloc(nvoxels, sizeof(unsigned int)); thisone->data = (void*)buf; } break; case FFF_INT: { int* buf = (int*)calloc(nvoxels, sizeof(int)); thisone->data = (void*)buf; } break; case FFF_ULONG: { unsigned long int* buf = (unsigned long int*)calloc(nvoxels, sizeof(unsigned long int)); thisone->data = (void*)buf; } break; case FFF_LONG: { long int* buf = (long int*)calloc(nvoxels, sizeof(long int)); thisone->data = (void*)buf; } break; case FFF_FLOAT: { float* buf = (float*)calloc(nvoxels, sizeof(float)); thisone->data = (void*)buf; } break; case FFF_DOUBLE: { double* buf = (double*)calloc(nvoxels, sizeof(double)); thisone->data = (void*)buf; } break; default: FFF_ERROR("Unrecognized data type", EINVAL); break; } /* Report error if array has not been allocated */ if (thisone->data==NULL) FFF_ERROR("Out of memory", ENOMEM); return thisone; } void fff_array_delete(fff_array* thisone) { if ((thisone->owner) && (thisone->data != NULL)) free(thisone->data); free(thisone); return; } fff_array fff_array_view(fff_datatype datatype, void* buf, size_t dimX, size_t dimY, size_t dimZ, size_t dimT, size_t offX, size_t offY, size_t offZ, size_t offT) { fff_array thisone; fff_array_ndims ndims = FFF_ARRAY_4D; unsigned int nbytes = fff_nbytes(datatype); /* Decrease the number of dimensions if applicable */ if (dimT == 1) { ndims = FFF_ARRAY_3D; if (dimZ == 1) { ndims = FFF_ARRAY_2D; if (dimY == 1) ndims = FFF_ARRAY_1D; } } thisone.ndims = ndims; /* Set dimensions / offsets / voxel size */ thisone.dimX = dimX; thisone.dimY = dimY; thisone.dimZ = dimZ; thisone.dimT = dimT; thisone.offsetX = offX; thisone.offsetY = offY; thisone.offsetZ = offZ; thisone.offsetT = offT; thisone.byte_offsetX = nbytes*offX; thisone.byte_offsetY = nbytes*offY; thisone.byte_offsetZ = nbytes*offZ; thisone.byte_offsetT = nbytes*offT; /* Set data type and point towards buffer */ thisone.datatype = datatype; thisone.data = buf; thisone.owner = 0; /* Set accessors */ switch(datatype) { case FFF_UCHAR: { thisone.get = &_get_uchar; thisone.set = &_set_uchar; } break; case FFF_SCHAR: { thisone.get = &_get_schar; thisone.set = &_set_schar; } break; case FFF_USHORT: { thisone.get = &_get_ushort; thisone.set = &_set_ushort; } break; case FFF_SSHORT: { thisone.get = &_get_sshort; thisone.set = &_set_sshort; } break; case FFF_UINT: { thisone.get = &_get_uint; thisone.set = &_set_uint; } break; case FFF_INT: { thisone.get = &_get_int; thisone.set = &_set_int; } break; case FFF_ULONG: { thisone.get = &_get_ulong; thisone.set = &_set_ulong; } break; case FFF_LONG: { thisone.get = &_get_long; thisone.set = &_set_long; } break; case FFF_FLOAT: { thisone.get = &_get_float; thisone.set = &_set_float; } break; case FFF_DOUBLE: { thisone.get = &_get_double; thisone.set = &_set_double; } break; default: { thisone.get = NULL; thisone.set = NULL; FFF_ERROR("Unrecognized data type", EINVAL); } break; } return thisone; } /* Check coordinate range and return FFF_NAN if position is out of bounds */ double fff_array_get(const fff_array* thisone, size_t x, size_t y, size_t z, size_t t) { size_t idx; if ((x >= thisone->dimX) || (y >= thisone->dimY) || (z >= thisone->dimZ) || (t >= thisone->dimT)) return FFF_NAN; idx = x*thisone->offsetX + y*thisone->offsetY + z*thisone->offsetZ + t*thisone->offsetT; return thisone->get((const char*)thisone->data, idx); } /* Check coordinate range and do noting position is out of bounds */ void fff_array_set(fff_array* thisone, size_t x, size_t y, size_t z, size_t t, double value) { size_t idx; if ((x >= thisone->dimX) || (y >= thisone->dimY) || (z >= thisone->dimZ) || (t >= thisone->dimT)) return; idx = x*thisone->offsetX + y*thisone->offsetY + z*thisone->offsetZ + t*thisone->offsetT; thisone->set((char*)thisone->data, idx, value); return; } void fff_array_set_all(fff_array* thisone, double val) { fff_array_iterator iter = fff_array_iterator_init(thisone); while (iter.idx < iter.size) { fff_array_set_from_iterator(thisone, iter, val); fff_array_iterator_update(&iter); } return; } fff_array fff_array_get_block(const fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ, size_t t0, size_t t1, size_t fT) { char* data = (char*)thisone->data; data += x0*thisone->byte_offsetX + y0*thisone->byte_offsetY + z0*thisone->byte_offsetZ + t0*thisone->byte_offsetT; return fff_array_view(thisone->datatype, (void*)data, (x1-x0)/fX+1, (y1-y0)/fY+1, (z1-z0)/fZ+1, (t1-t0)/fZ+1, fX*thisone->offsetX, fY*thisone->offsetY, fZ*thisone->offsetZ, fT*thisone->offsetT); } void fff_array_extrema (double* min, double* max, const fff_array* thisone) { double val; fff_array_iterator iter = fff_array_iterator_init(thisone); /* Initialization */ *min = FFF_POSINF; /* 0.0;*/ *max = FFF_NEGINF; /*0.0;*/ while (iter.idx < iter.size) { val = fff_array_get_from_iterator(thisone, iter); if (val < *min) *min = val; else if (val > *max) *max = val; fff_array_iterator_update(&iter); } return; } #define CHECK_DIMS(a1,a2) \ if ((a1->dimX != a2->dimX) || \ (a1->dimY != a2->dimY) || \ (a1->dimZ != a2->dimZ) || \ (a1->dimT != a2->dimT)) \ {FFF_ERROR("Arrays have different sizes", EINVAL); return;} \ void fff_array_copy(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double valSrc; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { valSrc = fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, valSrc); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } /* Applies an affine correction to the input array so that: s0 --> r0 s1 --> r1 */ void fff_array_compress(fff_array* aRes, const fff_array* aSrc, double r0, double s0, double r1, double s1) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double a, b, valSrc; CHECK_DIMS(aRes, aSrc); a = (r1-r0) / (s1-s0); b = r0 - a*s0; while (itSrc.idx < itSrc.size) { valSrc = fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, a*valSrc+b); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_add(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v += fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_sub(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v -= fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_mul(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v *= fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } /* Force denominator's aboslute value greater than FFF_TINY. */ void fff_array_div(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aSrc, itSrc); if (FFF_ABS(v)dimX*im->dimY*im->dimZ*im->dimT; /* Initialize pointer and coordinates */ iter.data = (char*)im->data; iter.x = 0; iter.y = 0; iter.z = 0; iter.t = 0; /* Boundary check parameters */ iter.ddimY = im->dimY - 1; iter.ddimZ = im->dimZ - 1; iter.ddimT = im->dimT - 1; if (axis == 3) { iter.ddimT = 0; iter.size /= im->dimT; } else if (axis == 2) { iter.ddimZ = 0; iter.size /= im->dimZ; } else if (axis == 1) { iter.ddimY = 0; iter.size /= im->dimY; } else if (axis == 0) iter.size /= im->dimX; /* Increments */ pY = iter.ddimY * im->byte_offsetY; pZ = iter.ddimZ * im->byte_offsetZ; pT = iter.ddimT * im->byte_offsetT; iter.incT = im->byte_offsetT; iter.incZ = im->byte_offsetZ - pT; iter.incY = im->byte_offsetY - pZ - pT; iter.incX = im->byte_offsetX - pY - pZ - pT; /* Update function */ switch(im->ndims) { case FFF_ARRAY_1D: iter.update = &_fff_array_iterator_update1d; break; case FFF_ARRAY_2D: iter.update = &_fff_array_iterator_update2d; break; case FFF_ARRAY_3D: iter.update = &_fff_array_iterator_update3d; break; case FFF_ARRAY_4D: default: iter.update = &_fff_array_iterator_update4d; break; } return iter; } fff_array_iterator fff_array_iterator_init(const fff_array* im) { return fff_array_iterator_init_skip_axis(im, -1); } static void _fff_array_iterator_update1d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; iter->data += iter->incX; iter->x = iter->idx; return; } static void _fff_array_iterator_update2d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->y < iter->ddimY) { iter->y ++; iter->data += iter->incY; return; } iter->y = 0; iter->x ++; iter->data += iter->incX; return; } static void _fff_array_iterator_update3d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->z < iter->ddimZ) { iter->z ++; iter->data += iter->incZ; return; } if (iter->y < iter->ddimY) { iter->z = 0; iter->y ++; iter->data += iter->incY; return; } iter->z = 0; iter->y = 0; iter->x ++; iter->data += iter->incX; return; } static void _fff_array_iterator_update4d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->t < iter->ddimT) { iter->t ++; iter->data += iter->incT; return; } if (iter->z < iter->ddimZ) { iter->t = 0; iter->z ++; iter->data += iter->incZ; return; } if (iter->y < iter->ddimY) { iter->t = 0; iter->z = 0; iter->y ++; iter->data += iter->incY; return; } iter->t = 0; iter->z = 0; iter->y = 0; iter->x ++; iter->data += iter->incX; return; } /* Image must be in DOUBLE format */ void fff_array_iterate_vector_function(fff_array* im, int axis, void(*func)(fff_vector*, void*), void* par) { fff_array_iterator iter; fff_vector x; if (im->datatype != FFF_DOUBLE) { FFF_WARNING("Image type must be double."); return; } if ((axis>3) || (axis<0)) { FFF_WARNING("Invalid axis."); return; } x.size = fff_array_dim(im, axis); x.stride = fff_array_offset(im, axis); x.owner = 0; iter = fff_array_iterator_init_skip_axis(im, axis); while (iter.idx < iter.size) { x.data = (double*)iter.data; (*func)(&x, par); fff_array_iterator_update(&iter); } return; } /* Convert image values to [0,clamp-1]; typically clamp = 256. Possibly modify the dynamic range if the input value is overestimated. For instance, the reconstructed MRI signal is generally encoded in 12 bits (values ranging from 0 to 4095). Therefore, this operation may result in a loss of information. */ void fff_array_clamp(fff_array* aRes, const fff_array* aSrc, double th, int* clamp) { double imin, imax, tth; int dmax = *clamp - 1; /* Compute input image min and max */ fff_array_extrema(&imin, &imax, aSrc); /* Make sure the threshold is not below the min intensity */ tth = FFF_MAX(th, imin); /* Test */ if (tth>imax) { FFF_WARNING("Inconsistent threshold, ignored."); tth = imin; } /* If the image dynamic is small, no need for compression: just downshift image values and re-estimate the dynamic range (hence imax is translated to imax-tth casted to SSHORT) */ if ((fff_is_integer(aSrc->datatype)) && ((imax-tth)<=dmax)) { fff_array_compress(aRes, aSrc, 0, tth, 1, tth+1); *clamp = (int)(imax-tth) + 1; } /* Otherwise, compress after downshifting image values (values equal to the threshold are reset to zero) */ else fff_array_compress(aRes, aSrc, 0, tth, dmax, imax); return; } /************************************************************************* Manually templated array acessors *************************************************************************/ static double _get_uchar(const char* data, size_t pos) { unsigned char* buf = (unsigned char*)data; return((double)buf[pos]); } static double _get_schar(const char* data, size_t pos) { signed char* buf = (signed char*)data; return((double)buf[pos]); } static double _get_ushort(const char* data, size_t pos) { unsigned short* buf = (unsigned short*)data; return((double)buf[pos]); } static double _get_sshort(const char* data, size_t pos) { signed short* buf = (signed short*)data; return((double)buf[pos]); } static double _get_uint(const char* data, size_t pos) { unsigned int* buf = (unsigned int*)data; return((double)buf[pos]); } static double _get_int(const char* data, size_t pos) { int* buf = (int*)data; return((double)buf[pos]); } static double _get_ulong(const char* data, size_t pos) { unsigned long int* buf = (unsigned long int*)data; return((double)buf[pos]); } static double _get_long(const char* data, size_t pos) { long int* buf = (long int*)data; return((double)buf[pos]); } static double _get_float(const char* data, size_t pos) { float* buf = (float*)data; return((double)buf[pos]); } static double _get_double(const char* data, size_t pos) { double* buf = (double*)data; return(buf[pos]); } static void _set_uchar(char* data, size_t pos, double value) { unsigned char* buf = (unsigned char*)data; buf[pos] = (unsigned char)(FFF_ROUND(value)); return; } static void _set_schar(char* data, size_t pos, double value) { signed char* buf = (signed char*)data; buf[pos] = (signed char)(FFF_ROUND(value)); return; } static void _set_ushort(char* data, size_t pos, double value) { unsigned short* buf = (unsigned short*)data; buf[pos] = (unsigned short)(FFF_ROUND(value)); return; } static void _set_sshort(char* data, size_t pos, double value) { signed short* buf = (signed short*)data; buf[pos] = (signed short)(FFF_ROUND(value)); return; } static void _set_uint(char* data, size_t pos, double value) { unsigned int* buf = (unsigned int*)data; buf[pos] = (unsigned int)(FFF_ROUND(value)); return; } static void _set_int(char* data, size_t pos, double value) { int* buf = (int*)data; buf[pos] = (int)(FFF_ROUND(value)); return; } static void _set_ulong(char* data, size_t pos, double value) { unsigned long int* buf = (unsigned long int*)data; buf[pos] = (unsigned long int)(FFF_ROUND(value)); return; } static void _set_long(char* data, size_t pos, double value) { long int* buf = (long int*)data; buf[pos] = (long int)(FFF_ROUND(value)); return; } static void _set_float(char* data, size_t pos, double value) { float* buf = (float*)data; buf[pos] = (float)value; return; } static void _set_double(char* data, size_t pos, double value) { double* buf = (double*)data; buf[pos] = value; return; } nipy-0.6.1/lib/fff/fff_array.h000066400000000000000000000235531470056100100161160ustar00rootroot00000000000000/*! \file fff_array.h \brief Basic image object \author Alexis Roche \date 2005-2006 This library implements a generic 4-dimensional array object that can be used to represent images. */ #ifndef FFF_ARRAY #define FFF_ARRAY #ifdef __cplusplus extern "C" { #endif #include "fff_base.h" #include "fff_vector.h" #include #define fff_array_dim(array, axis) \ ((axis)==0 ? (array->dimX) : ((axis)==1 ? (array->dimY) : ((axis)==2 ? (array->dimZ) : (array->dimT)) ) ) #define fff_array_offset(array, axis) \ ((axis)==0 ? (array->offsetX) : ((axis)==1 ? (array->offsetY) : ((axis)==2 ? (array->offsetZ) : (array->offsetT)) ) ) /* #define fff_array_copy(ares, asrc) \ fff_array_compress(ares, asrc, 0, 0, 1, 1) */ #define fff_array_new1d(dtype, dx) \ fff_array_new(dtype, dx, 1, 1, 1) #define fff_array_new2d(dtype, dx, dy) \ fff_array_new(dtype, dx, dy, 1, 1) #define fff_array_new3d(dtype, dx, dy, dz) \ fff_array_new(dtype, dx, dy, dz, 1) #define fff_array_view1d(dtype, data, dx, ox) \ fff_array_view(dtype, data, dx, 1, 1, 1, ox, 1, 1, 1) #define fff_array_view2d(dtype, data, dx, dy, ox, oy) \ fff_array_view(dtype, data, dx, dy, 1, 1, ox, oy, 1, 1) #define fff_array_view3d(dtype, data, dx, dy, dz, ox, oy, oz) \ fff_array_view(dtype, data, dx, dy, dz, 1, ox, oy, oz, 1) #define fff_array_get1d(array, x) \ fff_array_get(array, x, 0, 0, 0) #define fff_array_get2d(array, x, y) \ fff_array_get(array, x, y, 0, 0) #define fff_array_get3d(array, x, y) \ fff_array_get(array, x, y, z, 0) #define fff_array_set1d(array, x, a) \ fff_array_set(array, x, 0, 0, 0, a) #define fff_array_set2d(array, x, y, a) \ fff_array_set(array, x, y, 0, 0, a) #define fff_array_set3d(array, x, y, z, a) \ fff_array_set(array, x, y, z, 0, a) #define fff_array_get_block1d(array, x0, x1, fx) \ fff_array_get_block(array, x0, x1, fx, 0, 0, 1, 0, 0, 1, 0, 0, 1) #define fff_array_get_block2d(array, x0, x1, fx, y0, y1, fy) \ fff_array_get_block(array, x0, x1, fx, y0, y1, fy, 0, 0, 1, 0, 0, 1) #define fff_array_get_block3d(array, x0, x1, fx, y0, y1, fy, z0, z1, fz) \ fff_array_get_block(array, x0, x1, fx, y0, y1, fy, z0, z1, fz, 0, 0, 1) #define fff_array_get_from_iterator(array, iter) \ array->get(iter.data, 0) #define fff_array_set_from_iterator(array, iter, val) \ array->set(iter.data, 0, val) #define fff_array_iterator_update(iter) \ (iter)->update(iter) /*! \typedef fff_array_ndims \brief Image flag type */ typedef enum { FFF_ARRAY_1D = 1, /*!< 1d image */ FFF_ARRAY_2D = 2, /*!< 2d image */ FFF_ARRAY_3D = 3, /*!< 3d image */ FFF_ARRAY_4D = 4 /*!< 4d image */ } fff_array_ndims; /*! \struct fff_array \brief The fff image structure Image values are stored in a \c void linear array, the actual encoding type being specified by the field \c datatype. The image dimension along each axis are encoded by fields starting with \c dim, while the \c ndims flag specifies the biggest axis index corresponding to a non-unitary dimension; it essentially defines whether the image is 1d, 2d, 3d, or 4d. The use of offsets (or strides) makes the object independent from any storage convention. A pixel with coordinates (\a x, \a y, \a z, \a t) may be accessed using a command like: \code value = im->data[ x*im->offsetX + y*im->offsetY + z*im->offsetZ + t*im->offsetT ]; \endcode Note that this approach makes it possible to extract a sub-image from an original image without the need to reallocate memory. */ typedef struct { fff_array_ndims ndims; /*!< Image flag */ fff_datatype datatype; /*!< Image encoding type */ size_t dimX; /*!< Dimension (number of pixels) along first axis */ size_t dimY; /*!< Dimension (number of pixels) along second axis */ size_t dimZ; /*!< Dimension (number of pixels) along third axis */ size_t dimT; /*!< Dimension (number of pixels) along fourth axis */ size_t offsetX; /*!< Offset (relative to type) along first axis */ size_t offsetY; /*!< Offset (relative to type) along second axis */ size_t offsetZ; /*!< Offset (relative to type) along third axis */ size_t offsetT; /*!< Offset (relative to type) along fourth axis */ size_t byte_offsetX; /*!< Offset (in bytes) along first axis */ size_t byte_offsetY; /*!< Offset (in bytes) along second axis */ size_t byte_offsetZ; /*!< Offset (in bytes) along third axis */ size_t byte_offsetT; /*!< Offset (in bytes) along fourth axis */ void* data; /*!< Image buffer */ int owner; /*!< Non-zero if the object owns its data */ double (*get)(const char*, size_t); /*!< Get accessor */ void (*set)(char*, size_t, double); /*!< Set accessor */ } fff_array; /*! \struct fff_array_iterator \brief Image iterator structure */ typedef struct { size_t idx; size_t size; char* data; size_t x; size_t y; size_t z; size_t t; size_t ddimY; size_t ddimZ; size_t ddimT; size_t incX; size_t incY; size_t incZ; size_t incT; void (*update)(void*); /*!< Updater */ } fff_array_iterator; /*! \brief Constructor for the fff_array structure \param datatype image encoding type \param dimX number of pixels along the first axis \param dimY number of pixels along the second axis \param dimZ number of pixels along the third axis \param dimT number of pixels along the fourth axis This function allocates a new image buffer. */ extern fff_array* fff_array_new(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ, size_t dimT); /*! \brief Destructor for the \c fff_array structure \param thisone fff_array member to be deleted */ extern void fff_array_delete(fff_array* thisone); /*! \brief Array view \param datatype image encoding type \param buf already allocated image buffer \param dimX number of pixels along the first axis \param dimY number of pixels along the second axis \param dimZ number of pixels along the third axis \param dimT number of pixels along the fourth axis \param offX offset along the first axis \param offY offset along the second axis \param offZ offset along the third axis \param offT offset along the fourth axis This function assumes that the image buffer is already allocated. */ extern fff_array fff_array_view(fff_datatype datatype, void* buf, size_t dimX, size_t dimY, size_t dimZ, size_t dimT, size_t offX, size_t offY, size_t offZ, size_t offT); /*! \brief Generic function to access a voxel's value \param thisone input image \param x first coordinate \param y second coordinate \param z third coordinate \param t fourth coordinate Get image value at a specific location defined by voxel coordinates. Return \c fff_NAN if the position is out of bounds. */ extern double fff_array_get(const fff_array* thisone, size_t x, size_t y, size_t z, size_t t); /*! \brief Generic function to set one voxel's value \param value value to set \param thisone input image \param x first coordinate \param y second coordinate \param z third coordinate \param t fourth coordinate */ extern void fff_array_set(fff_array* thisone, size_t x, size_t y, size_t z, size_t t, double value); /*! \brief Set all pixel values to a given constant \param thisone image \param c constant */ extern void fff_array_set_all(fff_array* thisone, double c); /*! \brief Extract an image block \param thisone input image \param x0 first coordinate of the starting point \param x1 first coordinate of the finishing point \param y0 second coordinate of the starting point \param y1 second coordinate of the finishing point \param z0 third coordinate of the starting point \param z1 third coordinate of the finishing point \param t0 fourth coordinate of the starting point \param t1 fourth coordinate of the finishing point \param fX subsampling factor in the first direction \param fY subsampling factor in the second direction \param fZ subsampling factor in the third direction \param fT subsampling factor in the fourth direction */ extern fff_array fff_array_get_block(const fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ, size_t t0, size_t t1, size_t fT); extern void fff_array_extrema (double* min, double* max, const fff_array* thisone); extern void fff_array_copy(fff_array* ares, const fff_array* asrc); extern void fff_array_compress(fff_array* ares, const fff_array* asrc, double r0, double s0, double r1, double s1); extern void fff_array_add (fff_array * x, const fff_array * y); extern void fff_array_sub (fff_array * x, const fff_array * y); extern void fff_array_div (fff_array * x, const fff_array * y); extern void fff_array_mul (fff_array * x, const fff_array * y); /* Convert image values to [0,clamp-1]; typically clamp = 256. Possibly modify the dynamic range if the input value is overestimated. For instance, the reconstructed MRI signal is generally encoded in 12 bits (values ranging from 0 to 4095). Therefore, this operation may result in a loss of information. */ extern void fff_array_clamp(fff_array* ares, const fff_array* asrc, double th, int* clamp); extern fff_array_iterator fff_array_iterator_init(const fff_array* array); extern fff_array_iterator fff_array_iterator_init_skip_axis(const fff_array* array, int axis); /* extern void fff_array_iterator_update(fff_array_iterator* thisone); */ extern void fff_array_iterate_vector_function(fff_array* array, int axis, void(*func)(fff_vector*, void*), void* par); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_base.c000066400000000000000000000042571470056100100157050ustar00rootroot00000000000000#include "fff_base.h" unsigned int fff_nbytes(fff_datatype type) { unsigned int nbytes; switch(type) { case FFF_UCHAR: nbytes = (unsigned int)sizeof(unsigned char); break; case FFF_SCHAR: nbytes = (unsigned int)sizeof(signed char); break; case FFF_USHORT: nbytes = (unsigned int)sizeof(unsigned short); break; case FFF_SSHORT: nbytes = (unsigned int)sizeof(signed short); break; case FFF_UINT: nbytes = (unsigned int)sizeof(unsigned int); break; case FFF_INT: nbytes = (unsigned int)sizeof(int); break; case FFF_ULONG: nbytes = (unsigned int)sizeof(unsigned long); break; case FFF_LONG: nbytes = (unsigned int)sizeof(long); break; case FFF_FLOAT: nbytes = (unsigned int)sizeof(float); break; case FFF_DOUBLE: nbytes = (unsigned int)sizeof(double); break; default: nbytes = 0; break; } return nbytes; } int fff_is_integer(fff_datatype type) { int ok = 0; switch (type) { default: break; case FFF_UCHAR: case FFF_SCHAR: case FFF_USHORT: case FFF_SSHORT: case FFF_UINT: case FFF_INT: case FFF_ULONG: case FFF_LONG: ok = 1; break; } return ok; } fff_datatype fff_get_datatype( unsigned int sizeType, unsigned int integerType, unsigned int signedType ) { fff_datatype type = FFF_UNKNOWN_TYPE; /* Case: integer type */ if ( integerType ) { if ( signedType ) { if ( sizeType == sizeof(signed char) ) type = FFF_SCHAR; else if ( sizeType == sizeof(signed short) ) type = FFF_SSHORT; else if ( sizeType == sizeof(int) ) type = FFF_INT; else if ( sizeType == sizeof(signed long int) ) type = FFF_LONG; } else { if ( sizeType == sizeof(unsigned char) ) type = FFF_UCHAR; else if ( sizeType == sizeof(unsigned short) ) type = FFF_USHORT; else if ( sizeType == sizeof(unsigned int) ) type = FFF_UINT; else if ( sizeType == sizeof(unsigned long int) ) type = FFF_ULONG; } } /* Case: floating type */ else { if ( sizeType == sizeof(float) ) type = FFF_FLOAT; else if ( sizeType == sizeof(double) ) type = FFF_DOUBLE; } return type; } nipy-0.6.1/lib/fff/fff_base.h000066400000000000000000000107641470056100100157120ustar00rootroot00000000000000/*! \file fff_base.h \brief Basic fff macros and error handling functions \author Alexis Roche \date 2003-2008 */ #ifndef FFF_BASE #define FFF_BASE #ifdef __cplusplus extern "C" { #endif #include #include #ifdef INFINITY #define FFF_POSINF INFINITY #define FFF_NEGINF (-INFINITY) #else #define FFF_POSINF HUGE_VAL #define FFF_NEGINF (-HUGE_VAL) #endif #ifdef NAN #define FFF_NAN NAN #else #define FFF_NAN (FFF_POSINF/FFF_POSINF) #endif #ifdef NO_APPEND_FORTRAN # define FFF_FNAME(x) x #else # define FFF_FNAME(x) x##_ #endif /*! Displays an error message with associated error code. */ #define FFF_ERROR(message, errcode) \ { \ fprintf(stderr, "Unhandled error: %s (errcode %i)\n", message, errcode); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Displays a warning message. */ #define FFF_WARNING(message) \ { \ fprintf(stderr, "Warning: %s\n", message); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Displays a debug message. */ #define FFF_DEBUG(message) \ { \ fprintf(stderr, "DEBUG: %s\n", message); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Rounds \a a to the nearest smaller integer \bug Compilator-dependent? */ #define FFF_FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) /*! Rounds \a a to the nearest integer (either smaller or bigger) */ #define FFF_ROUND(a)(FFF_FLOOR(a+0.5)) /*! Rounds \a a to the nearest bigger integer */ #define FFF_CEIL(a)(-(FFF_FLOOR(-(a)))) /*! Rounds \a a to the nearest smaller integer, assuming \a a is non-negative \bug Compilator-dependent? */ #define FFF_UNSIGNED_FLOOR(a) ( (int)(a) ) /*! Rounds \a a to the nearest integer, assuming \a a is non-negative */ #define FFF_UNSIGNED_ROUND(a) ( (int)(a+0.5) ) /*! Rounds \a a to the nearest bigger integer, assuming \a a is non-negative */ #define FFF_UNSIGNED_CEIL(a) ( ( (int)(a)-a )!=0.0 ? (int)(a+1) : (int)(a) ) /*! Returns 1 if \a a is positive, -1 if \a a is negative, 0 if \a a equals zero Note that this macro differs from \a GSL_SIGN which returns +1 if \a a==0 */ #define FFF_SIGN(a)( (a)>0.0 ? 1 : ( (a)<0.0 ? -1 : 0 ) ) /*! Computes the absolute value of \a a */ #define FFF_ABS(a) ( (a) > 0.0 ? (a) : (-(a)) ) /*! Computes \f$ a^2 \f$ */ #define FFF_SQR(a) ( (a)*(a) ) /*! Computes \f$ a^3 \f$ */ #define FFF_CUBE(a) ( (a)*(a)*(a) ) /*! Computes \f$ a modulo, b ie the remainder after division of a by b \f$ */ #define FFF_REM(a, b) ( (int)(a)%(int)(b) ) /*! Computes the minimum of \a a and \a b */ #define FFF_MIN(a,b) ( (a) < (b) ? (a) : (b) ) /*! Computes the maximum of \a a and \a b */ #define FFF_MAX(a,b) ( (a) > (b) ? (a) : (b) ) /*! Low threshold a value to avoid vanishing */ #define FFF_TINY 1e-50 #define FFF_ENSURE_POSITIVE(a) ( (a) > FFF_TINY ? (a) : FFF_TINY ) #define FFF_IS_ODD(n) ((n) & 1) /*! \typedef fff_datatype \brief Data encoding types */ typedef enum { FFF_UNKNOWN_TYPE = -1, /*!< unknown type */ FFF_UCHAR = 0, /*!< unsigned char */ FFF_SCHAR = 1, /*!< signed char */ FFF_USHORT = 2, /*!< unsigned short */ FFF_SSHORT = 3, /*!< signed short */ FFF_UINT = 4, /*!< unsigned int */ FFF_INT = 5, /*!< (signed) int */ FFF_ULONG = 6, /*!< unsigned long int */ FFF_LONG = 7, /*!< (signed) long int */ FFF_FLOAT = 8, /*!< float */ FFF_DOUBLE = 9 /*!< double */ } fff_datatype; /*! \brief Return the byte length of a given data type \param type input data type */ extern unsigned int fff_nbytes(fff_datatype type); /*! \brief Return 1 if data type is integer, 0 otherwise \param type input data type */ extern int fff_is_integer(fff_datatype type); /*! \brief Return the data type that matches given features \param sizeType size in bytes \param integerType if zero, a floating-point type (\c float or \c double) is assumed \param signedType for integer types, tells whether the type is signed or not */ extern fff_datatype fff_get_datatype( unsigned int sizeType, unsigned int integerType, unsigned int signedType ); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_blas.c000066400000000000000000000430741470056100100157140ustar00rootroot00000000000000#include "fff_base.h" #include "fff_blas.h" #include #define FNAME FFF_FNAME /* TODO : add tests for dimension compatibility */ /* We have to account for the fact that BLAS assumes column-major ordered matrices by transposing */ #define DIAG(Diag) ( (Diag)==(CblasUnit) ? "U" : "N" ) #define TRANS(Trans) ( (Trans)==(CblasNoTrans) ? "N" : "T" ) #define SWAP_TRANS(Trans) ( (Trans)==(CblasNoTrans) ? "T" : "N" ) #define SWAP_UPLO(Uplo) ( (Uplo)==(CblasUpper) ? "L" : "U" ) #define SWAP_SIDE(Side) ( (Side)==(CblasRight) ? "L" : "R" ) /* BLAS 1 */ extern double FNAME(ddot)(int* n, double* dx, int* incx, double* dy, int* incy); extern double FNAME(dnrm2)(int* n, double* x, int* incx); extern double FNAME(dasum)(int* n, double* dx, int* incx); extern int FNAME(idamax)(int* n, double* dx, int* incx); extern int FNAME(dswap)(int* n, double* dx, int* incx, double* dy, int* incy); extern int FNAME(dcopy)(int* n, double* dx, int* incx, double* dy, int* incy); extern int FNAME(daxpy)(int* n, double* da, double* dx, int* incx, double* dy, int* incy); extern int FNAME(dscal)(int* n, double* da, double* dx, int* incx); extern int FNAME(drotg)(double* da, double* db, double* c__, double* s); extern int FNAME(drot)(int* n, double* dx, int* incx, double* dy, int* incy, double* c__, double* s); extern int FNAME(drotmg)(double* dd1, double* dd2, double* dx1, double* dy1, double* dparam); extern int FNAME(drotm)(int* n, double* dx, int* incx, double* dy, int* incy, double* dparam); /* BLAS 2 */ extern int FNAME(dgemv)(char *trans, int* m, int* n, double* alpha, double* a, int* lda, double* x, int* incx, double* beta, double* y, int* incy); extern int FNAME(dtrmv)(char *uplo, char *trans, char *diag, int* n, double* a, int* lda, double* x, int* incx); extern int FNAME(dtrsv)(char *uplo, char *trans, char *diag, int* n, double* a, int* lda, double* x, int* incx); extern int FNAME(dsymv)(char *uplo, int* n, double* alpha, double* a, int* lda, double* x, int* incx, double *beta, double* y, int* incy); extern int FNAME(dger)(int* m, int* n, double* alpha, double* x, int* incx, double* y, int* incy, double* a, int* lda); extern int FNAME(dsyr)(char *uplo, int* n, double* alpha, double* x, int* incx, double* a, int* lda); extern int FNAME(dsyr2)(char *uplo, int* n, double* alpha, double* x, int* incx, double* y, int* incy, double* a, int* lda); /* BLAS 3 */ extern int FNAME(dgemm)(char *transa, char *transb, int* m, int* n, int* k, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); extern int FNAME(dsymm)(char *side, char *uplo, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); extern int FNAME(dtrmm)(char *side, char *uplo, char *transa, char *diag, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb); extern int FNAME(dtrsm)(char *side, char *uplo, char *transa, char *diag, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb); extern int FNAME(dsyrk)(char *uplo, char *trans, int* n, int* k, double* alpha, double* a, int* lda, double* beta, double* c__, int* ldc); extern int FNAME(dsyr2k)(char *uplo, char *trans, int* n, int* k, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); /****** BLAS 1 ******/ /* Compute the scalar product x^T y for the vectors x and y, returning the result in result.*/ double fff_blas_ddot (const fff_vector * x, const fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(ddot)(&n, x->data, &incx, y->data, &incy) ); } /* Compute the Euclidean norm ||x||_2 = \sqrt {\sum x_i^2} of the vector x. */ double fff_blas_dnrm2 (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dnrm2)(&n, x->data, &incx) ); } /* Compute the absolute sum \sum |x_i| of the elements of the vector x.*/ double fff_blas_dasum (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dasum)(&n, x->data, &incx) ); } /* Return the index of the largest element of the vector x. The largest element is determined by its absolute magnitude. We substract one to the original Fortran routine an actual C index. */ CBLAS_INDEX_t fff_blas_idamax (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( (CBLAS_INDEX_t)(FNAME(idamax)(&n, x->data, &incx) - 1) ); } /* Exchange the elements of the vectors x and y.*/ int fff_blas_dswap (fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(dswap)(&n, x->data, &incx, y->data, &incy) ); } /* Copy the elements of the vector x into the vector y */ int fff_blas_dcopy (const fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(dcopy)(&n, x->data, &incx, y->data, &incy) ); } /* Compute the sum y = \alpha x + y for the vectors x and y */ int fff_blas_daxpy (double alpha, const fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(daxpy)(&n, &alpha, x->data, &incx, y->data, &incy) ); } /* Rescale the vector x by the multiplicative factor alpha. */ int fff_blas_dscal (double alpha, fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dscal)(&n, &alpha, x->data, &incx) ); } /* Compute a Givens rotation (c,s) which zeroes the vector (a,b), [ c s ] [ a ] = [ r ] [ -s c ] [ b ] [ 0 ] The variables a and b are overwritten by the routine. */ int fff_blas_drotg (double a[], double b[], double c[], double s[]) { return( FNAME(drotg)(a, b, c, s) ); } /* Apply a Givens rotation (x', y') = (c x + s y, -s x + c y) to the vectors x, y.*/ int fff_blas_drot (fff_vector * x, fff_vector * y, double c, double s) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(drot)(&n, x->data, &incx, y->data, &incy, &c, &s) ); } /* Compute a modified Givens transformation. The modified Givens transformation is defined in the original Level-1 blas specification. */ int fff_blas_drotmg (double d1[], double d2[], double b1[], double b2, double P[]) { return( FNAME(drotmg)(d1, d2, b1, &b2, P) ); } /* Apply a modified Givens transformation.*/ int fff_blas_drotm (fff_vector * x, fff_vector * y, const double P[]) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(drotm)(&n, x->data, &incx, y->data, &incy, (double*)P) ); } /****** BLAS 2 ******/ /* Compute the matrix-vector product and sum y = \alpha op(A) x + \beta y, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. */ int fff_blas_dgemv (CBLAS_TRANSPOSE_t TransA, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y) { char* trans = SWAP_TRANS(TransA); int incx = (int) x->stride; int incy = (int) y->stride; int m = (int) A->size2; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dgemv)(trans, &m, &n, &alpha, A->data, &lda, x->data, &incx, &beta, y->data, &incy) ); } /* Compute the matrix-vector product x = op(A) x for the triangular matrix A, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of the matrix is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced.*/ int fff_blas_dtrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(TransA); char* diag = DIAG(Diag); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dtrmv)(uplo, trans, diag, &n, A->data, &lda, x->data, &incx) ); } /* Compute inv(op(A)) x for x, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of the matrix is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(TransA); char* diag = DIAG(Diag); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dtrsv)(uplo, trans, diag, &n, A->data, &lda, x->data, &incx) ); } /* Compute the matrix-vector product and sum y = \alpha A x + \beta y for the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsymv (CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int incy = (int) y->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsymv)(uplo, &n, &alpha, A->data, &lda, x->data, &incx, &beta, y->data, &incy) ); } /* Compute the rank-1 update A = \alpha x y^T + A of the matrix A.*/ int fff_blas_dger (double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A) { int incx = (int) x->stride; int incy = (int) y->stride; int m = (int) A->size2; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dger)(&m, &n, &alpha, y->data, &incy, x->data, &incx, A->data, &lda) ); } /* Compute the symmetric rank-1 update A = \alpha x x^T + A of the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsyr (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, fff_matrix * A) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsyr)(uplo, &n, &alpha, x->data, &incx, A->data, &lda ) ); } /* These functions compute the symmetric rank-2 update A = \alpha x y^T + \alpha y x^T + A of the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsyr2 (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int incy = (int) y->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsyr2)(uplo, &n, &alpha, y->data, &incy, x->data, &incx, A->data, &lda) ); } /****** BLAS 3 ******/ /* Compute the matrix-matrix product and sum C = \alpha op(A) op(B) + \beta C where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans and similarly for the parameter TransB. */ int fff_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { /* We have A and B in C convention, hence At and Bt in F convention. By computing Bt*At in F convention, we get A*B in C convention. Hence, m is the number of rows of Bt and Ct (number of cols of B and C) n is the number of cols of At and Ct (number of rows of A and C) k is the number of cols of Bt and rows of At (number of rows of B and cols of A) */ char* transa = TRANS(TransA); char* transb = TRANS(TransB); int m = C->size2; int n = C->size1; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; int k = (TransB == CblasNoTrans) ? (int)B->size1 : (int)B->size2; return( FNAME(dgemm)(transb, transa, &m, &n, &k, &alpha, B->data, &ldb, A->data, &lda, &beta, C->data, &ldc) ); } /* Compute the matrix-matrix product and sum C = \alpha A B + \beta C for Side is CblasLeft and C = \alpha B A + \beta C for Side is CblasRight, where the matrix A is symmetric. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); int m = C->size2; int n = C->size1; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; return ( FNAME(dsymm)(side, uplo, &m, &n, &alpha, A->data, &lda, B->data, &ldb, &beta, C->data, &ldc) ); } /* Compute the matrix-matrix product B = \alpha op(A) B for Side is CblasLeft and B = \alpha B op(A) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); char* transa = TRANS(TransA); char* diag = DIAG(Diag); int m = B->size2; int n = B->size1; int lda = (int) A->tda; int ldb = (int) B->tda; return( FNAME(dtrmm)(side, uplo, transa, diag, &m, &n, &alpha, A->data, &lda, B->data, &ldb) ); } /* Compute the inverse-matrix matrix product B = \alpha op(inv(A))B for Side is CblasLeft and B = \alpha B op(inv(A)) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); char* transa = TRANS(TransA); char* diag = DIAG(Diag); int m = B->size2; int n = B->size1; int lda = (int) A->tda; int ldb = (int) B->tda; return( FNAME(dtrsm)(side, uplo, transa, diag, &m, &n, &alpha, A->data, &lda, B->data, &ldb) ); } /* Compute a rank-k update of the symmetric matrix C, C = \alpha A A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. */ int fff_blas_dsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, double beta, fff_matrix * C) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(Trans); int n = C->size1; int k = (Trans == CblasNoTrans) ? (int)A->size1 : (int)A->size2; int lda = (int) A->tda; int ldc = (int) C->tda; return( FNAME(dsyrk)(uplo, trans, &n, &k, &alpha, A->data, &lda, &beta, C->data, &ldc) ); } /* Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + \alpha B A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T B + \alpha B^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. */ int fff_blas_dsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(Trans); int n = C->size1; int k = (Trans == CblasNoTrans) ? (int)B->size1 : (int)B->size2; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; return( FNAME(dsyr2k)(uplo, trans, &n, &k, &alpha, B->data, &ldb, A->data, &lda, &beta, C->data, &ldc) ); } nipy-0.6.1/lib/fff/fff_blas.h000066400000000000000000000071601470056100100157150ustar00rootroot00000000000000/*! \file fff_blas.h \brief lite wrapper around the Fortran Basic Linear Algeabra Library (BLAS) \author Alexis Roche \date 2008 This library can be linked against the standard (Fortran) blas library, but not against cblas. */ #ifndef FFF_BLAS #define FFF_BLAS #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" #define CBLAS_INDEX_t size_t /* this may vary between platforms */ typedef enum {CblasRowMajor=101, CblasColMajor=102} CBLAS_ORDER_t; typedef enum {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113} CBLAS_TRANSPOSE_t; typedef enum {CblasUpper=121, CblasLower=122} CBLAS_UPLO_t; typedef enum {CblasNonUnit=131, CblasUnit=132} CBLAS_DIAG_t; typedef enum {CblasLeft=141, CblasRight=142} CBLAS_SIDE_t; /* BLAS 1 */ extern double fff_blas_ddot (const fff_vector * x, const fff_vector * y); extern double fff_blas_dnrm2 (const fff_vector * x); extern double fff_blas_dasum (const fff_vector * x); extern CBLAS_INDEX_t fff_blas_idamax (const fff_vector * x); extern int fff_blas_dswap (fff_vector * x, fff_vector * y); extern int fff_blas_dcopy (const fff_vector * x, fff_vector * y); extern int fff_blas_daxpy (double alpha, const fff_vector * x, fff_vector * y); extern int fff_blas_dscal (double alpha, fff_vector * x); extern int fff_blas_drot (fff_vector * x, fff_vector * y, double c, double s); extern int fff_blas_drotg (double a[], double b[], double c[], double s[]); extern int fff_blas_drotmg (double d1[], double d2[], double b1[], double b2, double P[]); extern int fff_blas_drotm (fff_vector * x, fff_vector * y, const double P[]); /* BLAS 2 */ extern int fff_blas_dgemv (CBLAS_TRANSPOSE_t TransA, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y); extern int fff_blas_dtrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x); extern int fff_blas_dtrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x); extern int fff_blas_dsymv (CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y); extern int fff_blas_dger (double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A); extern int fff_blas_dsyr (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, fff_matrix * A); extern int fff_blas_dsyr2 (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A); /* BLAS 3 */ extern int fff_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); extern int fff_blas_dsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); extern int fff_blas_dtrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B); extern int fff_blas_dtrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B); extern int fff_blas_dsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, double beta, fff_matrix * C); extern int fff_blas_dsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_gen_stats.c000066400000000000000000000045051470056100100167560ustar00rootroot00000000000000#include "fff_gen_stats.h" #include "fff_lapack.h" #include #include #include #include #include /* Generate a random permutation from [0..n-1]. */ extern void fff_permutation(unsigned int* x, unsigned int n, unsigned long magic) { unsigned int* xi, i, ir, j, tmp, nc; unsigned long int m = magic; /* Initialize x as the identity permutation */ for(i=0, xi=x; i 0 ) { nn --; c = _combinations(kk-1, nn); /* If i is accepted, then store it and do: kk-- */ if ( m < c ) { *bx = i; bx ++; kk --; } else m = m - c; /* Next candidate */ i ++; } return; } /* Squared mahalanobis distance: d2 = x' S^-1 x Beware: x is not const */ extern double fff_mahalanobis(fff_vector* x, fff_matrix* S, fff_matrix* Saux) { double d2; double m = 0.0; /* Cholesky decomposition: S = L L^t, L lower triangular */ fff_lapack_dpotrf(CblasLower, S, Saux); /* Compute S^-1 x */ fff_blas_dtrsv(CblasLower, CblasNoTrans, CblasNonUnit, S, x); /* L^-1 x */ /* Compute x' S^-1 x */ d2 = (double) fff_vector_ssd(x, &m, 1); return d2; } nipy-0.6.1/lib/fff/fff_gen_stats.h000066400000000000000000000026551470056100100167670ustar00rootroot00000000000000/*! \file fff_gen_stats.h \brief General interest statistical routines \author Alexis Roche \date 2004-2008 */ #ifndef FFF_GEN_STATS #define FFF_GEN_STATS #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" /*! \brief Squared Mahalanobis distance \param x input data vector (beware: gets modified) \param S associated variance matrix \param Saux auxiliary matrix, same size as \a S Compute the squared Mahalanobis distance \f$ d^2 = x^t S^{-1} x \f$. The routine uses the Cholesky decomposition: \f$ S = L L^t \f$ where \a L is lower triangular, and then exploits the fact that \f$ d^2 = \| L^{-1}x \|^2 \f$. */ extern double fff_mahalanobis( fff_vector* x, fff_matrix* S, fff_matrix* Saux ); /* \brief Generate a permutation from \a [0..n-1] \param x output list of integers \param n interval range \param seed initial state of the random number generator \a x needs is assumed contiguous, pre-allocated with size \a n. */ extern void fff_permutation(unsigned int* x, unsigned int n, unsigned long magic); /* \brief Generate a random combination of \a k elements in \a [0..n-1]. \a x must be contiguous, pre-allocated with size \a k. By convention, elements are output in ascending order. */ extern void fff_combination(unsigned int* x, unsigned int k, unsigned int n, unsigned long magic); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_glm_kalman.c000066400000000000000000000240271470056100100170720ustar00rootroot00000000000000#include "fff_glm_kalman.h" #include "fff_base.h" #include "fff_blas.h" #include #include /* Declaration of static functions */ static void _fff_glm_RKF_iterate_Vb( fff_matrix* Vb, const fff_matrix* Vb0, const fff_matrix* Hspp, double aux1, double aux2, fff_matrix* Maux ); static double _fff_glm_hermit_norm( const fff_matrix* A, const fff_vector* x, fff_vector* vaux ); fff_glm_KF* fff_glm_KF_new( size_t dim ) { fff_glm_KF * thisone; /* Start with allocating the object */ thisone = (fff_glm_KF*) calloc( 1, sizeof(fff_glm_KF) ); /* Checks that the pointer has been allocated */ if ( thisone == NULL) return NULL; /* Allocate KF objects */ thisone->b = fff_vector_new( dim ); thisone->Cby = fff_vector_new( dim ); thisone->Vb = fff_matrix_new( dim, dim ); /* Initialization */ thisone->dim = dim; thisone->t = 0; thisone->ssd = 0.0; thisone->s2 = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; /* Initialize covariance using a scalar matrix */ fff_matrix_set_scalar( thisone->Vb, FFF_GLM_KALMAN_INIT_VAR); return thisone; } void fff_glm_KF_delete( fff_glm_KF* thisone ) { if ( thisone != NULL ) { if ( thisone->b != NULL ) fff_vector_delete(thisone->b); if ( thisone->Cby != NULL ) fff_vector_delete(thisone->Cby); if ( thisone->Vb != NULL ) fff_matrix_delete(thisone->Vb); free( thisone ); } return; } void fff_glm_KF_reset( fff_glm_KF* thisone ) { thisone->t = 0; thisone->ssd = 0.0; thisone->s2 = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; fff_vector_set_all( thisone->b, 0.0 ); fff_matrix_set_scalar( thisone->Vb, FFF_GLM_KALMAN_INIT_VAR ); return; } void fff_glm_KF_iterate( fff_glm_KF* thisone, double y, const fff_vector* x ) { double Ey, Vy, invVy, ino; /* Update time */ thisone->t ++; /* Measurement moments conditional to the effect */ Ey = fff_blas_ddot( x, thisone->b ); fff_blas_dsymv( CblasUpper, 1.0, thisone->Vb, x, 0.0, thisone->Cby ); Vy = fff_blas_ddot( x, thisone->Cby ) + 1.0; invVy = 1/Vy; /* Inovation */ ino = y - Ey; /* Update effect estimate */ fff_blas_daxpy( invVy*ino, thisone->Cby, thisone->b ); /* Update effect variance matrix: Vb = Vb - invVy*Cby*Cby' */ fff_blas_dger( -invVy, thisone->Cby, thisone->Cby, thisone->Vb ); /* Update sum of squares and scale */ thisone->ssd = thisone->ssd + FFF_SQR(ino)*invVy; thisone->s2 = thisone->ssd / (double)thisone->t; return; } fff_glm_RKF* fff_glm_RKF_new( size_t dim ) { fff_glm_RKF* thisone; /* Start with allocating the object */ thisone = (fff_glm_RKF*) calloc( 1, sizeof(fff_glm_RKF) ); /* Checks that the pointer has been allocated */ if ( thisone == NULL) return NULL; /* Allocate RKF objects */ thisone->Kfilt = fff_glm_KF_new( dim ); thisone->db = fff_vector_new( dim ); thisone->Hssd = fff_matrix_new( dim, dim ); thisone->Gspp = fff_vector_new( dim ); thisone->Hspp = fff_matrix_new( dim, dim ); thisone->b = fff_vector_new( dim ); thisone->Vb = fff_matrix_new( dim, dim ); thisone->vaux = fff_vector_new( dim ); thisone->Maux = fff_matrix_new( dim, dim ); /* Initialization */ thisone->dim = dim; thisone->t = 0; thisone->spp = 0.0; thisone->s2 = 0.0; thisone->a = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; return thisone; } void fff_glm_RKF_delete( fff_glm_RKF* thisone ) { if ( thisone != NULL ) { if ( thisone->Kfilt != NULL ) fff_glm_KF_delete( thisone->Kfilt ); if ( thisone->db != NULL ) fff_vector_delete(thisone->db); if ( thisone->Hssd != NULL ) fff_matrix_delete(thisone->Hssd); if ( thisone->Gspp != NULL ) fff_vector_delete(thisone->Gspp); if ( thisone->Hspp != NULL ) fff_matrix_delete(thisone->Hspp); if ( thisone->b != NULL ) fff_vector_delete(thisone->b); if ( thisone->Vb != NULL ) fff_matrix_delete(thisone->Vb); if ( thisone->vaux != NULL ) fff_vector_delete(thisone->vaux); if ( thisone->Maux != NULL ) fff_matrix_delete(thisone->Maux); free(thisone); } return; } void fff_glm_RKF_reset( fff_glm_RKF* thisone ) { thisone->t = 0; thisone->spp = 0; thisone->s2 = 0; thisone->a = 0; thisone->dof = 0; thisone->s2_cor = 0; fff_glm_KF_reset( thisone->Kfilt ); fff_vector_set_all( thisone->Gspp, 0.0 ); fff_matrix_set_all( thisone->Hssd, 0.0 ); fff_matrix_set_all( thisone->Hspp, 0.0 ); return; } void fff_glm_RKF_iterate( fff_glm_RKF* thisone, unsigned int nloop, double y, const fff_vector* x, double yy, const fff_vector* xx ) { unsigned int iter; double cor, r, rr, ssd_ref, spp_ref, aux1, aux2; /* Update time */ thisone->t ++; /* Store the current OLS estimate */ fff_vector_memcpy( thisone->vaux, thisone->Kfilt->b ); /* Iterate the standard Kalman filter */ fff_glm_KF_iterate( thisone->Kfilt, y, x ); /* OLS estimate variation */ fff_vector_memcpy( thisone->db, thisone->Kfilt->b ); fff_vector_sub( thisone->db, thisone->vaux ); /* db = b - db */ /* Update SSD hessian: Hssd = Hssd + x*x' */ fff_blas_dger( 1.0, x, x, thisone->Hssd ); /* Dont process any further if we are dealing with the first scan */ if ( thisone->t==1 ) { thisone->s2 = thisone->Kfilt->s2; fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_matrix_memcpy( thisone->Vb, thisone->Kfilt->Vb ); return; } /* Update bias correction factor otherwise */ else cor = (double)thisone->t / (double)(thisone->t - 1); /* Update SPP value */ aux1 = fff_blas_ddot( x, thisone->Kfilt->b ); r = y - aux1; aux1 = fff_blas_ddot( xx, thisone->Kfilt->b ); rr = yy - aux1; aux1 = fff_blas_ddot( thisone->Gspp, thisone->db ); thisone->spp += 2.0*aux1 + _fff_glm_hermit_norm( thisone->Hspp, thisone->db, thisone->vaux ) + r*rr; /* Update SPP gradient. Notice, we currently have: vaux == Hspp*db */ fff_vector_add ( thisone->Gspp, thisone->vaux ); fff_blas_daxpy( -.5*rr, x, thisone->Gspp ); fff_blas_daxpy( -.5*r, xx, thisone->Gspp ); /* Update SPP hessian: Hspp = Hspp + .5*(x*xx'+xx*x') */ fff_blas_dsyr2( CblasUpper, .5, x, xx, thisone->Hspp ); /* Update autocorrelation */ thisone->a = cor*thisone->spp / FFF_ENSURE_POSITIVE( thisone->Kfilt->ssd ); /* Update scale */ thisone->s2 = thisone->Kfilt->s2; /* Refinement loop */ fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_matrix_memcpy( thisone->Vb, thisone->Kfilt->Vb ); iter = 1; while ( iter < nloop ) { aux1 = 1/(1 + FFF_SQR(thisone->a)); aux2 = 2*cor*thisone->a; /* Update covariance */ _fff_glm_RKF_iterate_Vb( thisone->Vb, thisone->Kfilt->Vb, thisone->Hspp, aux1, aux2, thisone->Maux ); /* Update effect estimate */ fff_blas_dsymv( CblasUpper, aux2, thisone->Vb, thisone->Gspp, 0.0, thisone->db ); fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_vector_add( thisone->b, thisone->db ); /* Calculate SSD and SPP at current estimate */ aux1 = fff_blas_ddot( thisone->Gspp, thisone->db ); spp_ref = thisone->spp + 2*aux1 + _fff_glm_hermit_norm( thisone->Hspp, thisone->db, thisone->vaux ); ssd_ref = thisone->Kfilt->ssd + _fff_glm_hermit_norm( thisone->Hssd, thisone->db, thisone->vaux ); /* Update autocorrelation */ thisone->a = cor*spp_ref / FFF_ENSURE_POSITIVE(ssd_ref); /* Update scale */ thisone->s2 = (1-FFF_SQR(thisone->a))*ssd_ref / (double)thisone->t; /* Counter */ iter ++; } return; } void fff_glm_KF_fit( fff_glm_KF* thisone, const fff_vector* y, const fff_matrix* X ) { size_t i, offset_xi = 0; double* yi = y->data; fff_vector xi; /* Init */ fff_glm_KF_reset( thisone ); xi.size = X->size2; xi.stride = 1; /* Tests */ if ( X->size1 != y->size ) return; /* Loop */ for( i=0; isize; i++, yi+=y->stride, offset_xi+=X->tda ) { /* Get the i-th row of the design matrix */ xi.data = X->data + offset_xi; /* Iterate the Kalman filter */ fff_glm_KF_iterate( thisone, *yi, &xi ); } /* DOF */ thisone->dof = (double)(y->size - X->size2); thisone->s2_cor = ((double)y->size/thisone->dof)*thisone->s2; return; } void fff_glm_RKF_fit( fff_glm_RKF* thisone, unsigned int nloop, const fff_vector* y, const fff_matrix* X ) { size_t i, offset_xi = 0; double* yi = y->data; fff_vector xi, xxi; double yyi = 0.0; unsigned int nloop_actual = 1; /* Init */ fff_glm_RKF_reset( thisone ); xi.size = X->size2; xi.stride = 1; xxi.size = X->size2; xxi.stride = 1; xxi.data = NULL; /* Tests */ if ( X->size1 != y->size ) return; /* Loop */ for( i=0; isize; i++, yi+=y->stride, offset_xi+=X->tda ) { /* Get the i-th row of the design matrix */ xi.data = X->data + offset_xi; /* Refinement loop only needed at the last time frame */ if ( i == (y->size-1) ) nloop_actual = nloop; /* Iterate the refined Kalman filter */ fff_glm_RKF_iterate( thisone, nloop_actual, *yi, &xi, yyi, &xxi ); /* Copy current time values */ yyi = *yi; xxi.data = xi.data; } /* DOF */ thisone->dof = (double)(y->size - X->size2); thisone->s2_cor = ((double)y->size/thisone->dof)*thisone->s2; return; } /* Compute: Vb = aux1 * ( Id + aux1*aux2*Vb0*Hspp ) * Vb0 This corresponds to a simplification as the exact update formula would be: Vb = aux1 * pinv( eye(p) - aux1*aux2*Vbd*He ) * Vbd */ static void _fff_glm_RKF_iterate_Vb( fff_matrix* Vb, const fff_matrix* Vb0, const fff_matrix* Hspp, double aux1, double aux2, fff_matrix* Maux ) { fff_blas_dsymm ( CblasLeft, CblasUpper, 1.0, Hspp, Vb0, 0.0, Maux ); /** Maux == Hspp*Vb0 **/ fff_matrix_memcpy( Vb, Vb0 ); fff_blas_dgemm( CblasNoTrans, CblasNoTrans, FFF_SQR(aux1)*aux2, Vb0, Maux, aux1, Vb ); return; } /* Static function to compute the Hermitian norm: x'*A*x for a positive symmetric matrix A. The matrix-vector product A*x is output in the auxiliary vector, vaux. */ static double _fff_glm_hermit_norm( const fff_matrix* A, const fff_vector* x, fff_vector* vaux ) { double norm = 0.0; fff_blas_dsymv( CblasUpper, 1.0, A, x, 0.0, vaux ); norm = fff_blas_ddot( x, vaux ); return FFF_MAX( norm, 0.0 ); } nipy-0.6.1/lib/fff/fff_glm_kalman.h000066400000000000000000000132551470056100100171000ustar00rootroot00000000000000/*! \file fff_glm_kalman.h \brief General linear model fitting using Kalman filters \author Alexis Roche \date 2004-2006 This library implements several Kalman filter variants to fit a signal (represented as a gsl_vector structure) in terms of a general linear model. Kalman filtering works incrementally as opposed to more classical GLM fitting procedures, hence making it possible to produce parameter estimates on each time frame. Two methods are currently available: - the standard Kalman filter: performs an ordinary least-square regression, hence ignoring the temporal autocorrelation of the errors. - the refined Kalman filter: original Kalman extension to estimate both the GLM parameters and the noise autocorrelation based on an autoregressive AR(1) model. Significantly more memory demanding than the standard KF. */ #ifndef FFF_GLM_KALMAN #define FFF_GLM_KALMAN #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" #define FFF_GLM_KALMAN_INIT_VAR 1e7 /*! \struct fff_glm_KF \brief Standard Kalman filter structure. */ typedef struct{ size_t t; /*!< time counter */ size_t dim; /*!< model dimension (i.e. number of linear regressors) */ fff_vector* b; /*!< effect vector */ fff_matrix* Vb; /*!< effect variance matrix before multiplication by scale */ fff_vector* Cby; /*!< covariance between the effect and the data before multiplication by scale */ double ssd; /*!< sum of squared residuals */ double s2; /*!< scale parameter (squared) */ double dof; /*!< degrees of freedom */ double s2_cor; /*!< s2 corrected for degrees of freedom, s2_cor=n*s2/dof */ } fff_glm_KF; /*! \struct fff_glm_RKF \brief Refined Kalman filter structure. */ typedef struct{ size_t t; /*!< time counter */ size_t dim; /*!< model dimension (i.e. number of linear regressors) */ fff_glm_KF* Kfilt; /*!< standard kalman filter */ fff_vector* db; /*!< auxiliary vector for estimate variation */ fff_matrix* Hssd; /*!< SSD hessian (SSD = sum of squared differences) */ double spp; /*!< SSP value (SPP = sum of paired products) */ fff_vector* Gspp; /*!< SSP gradient */ fff_matrix* Hspp; /*!< SSP hessian */ fff_vector* b; /*!< effect vector */ fff_matrix* Vb; /*!< effect variance matrix before multiplication by scale */ double s2; /*!< scale parameter (squared) */ double a; /*!< autocorrelation parameter */ double dof; /*!< degrees of freedom */ double s2_cor; /*!< s2 corrected for degrees of freedom, s2_cor=n*s2/dof */ fff_vector* vaux; /*!< auxiliary vector */ fff_matrix* Maux; /*!< auxiliary matrix */ } fff_glm_RKF; /*! \brief Constructor for the fff_glm_KF structure \param dim model dimension (number of linear regressors) */ extern fff_glm_KF* fff_glm_KF_new( size_t dim ); /*! \brief Destructor for the fff_glm_KF structure \param thisone the fff_glm_KF structure to be deleted */ extern void fff_glm_KF_delete( fff_glm_KF* thisone ); /*! \brief Reset function (without destruction) for the fff_glm_KF structure \param thisone the fff_glm_KF structure to be reset */ extern void fff_glm_KF_reset( fff_glm_KF* thisone ); /*! \brief Performs a standard Kalman iteration from a fff_glm_KF structure \param thisone the fff_glm_KF structure to be iterated \param y current signal sample \param x current regressor values */ extern void fff_glm_KF_iterate( fff_glm_KF* thisone, double y, const fff_vector* x ); /*! \brief Constructor for the fff_glm_RKF structure \param dim model dimension (number of linear regressors) */ extern fff_glm_RKF* fff_glm_RKF_new( size_t dim ); /*! \brief Destructor for the fff_glm_RKF structure \param thisone the fff_glm_KF structure to be deleted */ extern void fff_glm_RKF_delete( fff_glm_RKF* thisone ); /*! \brief Reset function (without destruction) for the fff_glm_RKF structure \param thisone the fff_glm_KF structure to be reset */ extern void fff_glm_RKF_reset( fff_glm_RKF* thisone ); /*! \brief Performs a refined Kalman iteration from a fff_glm_RKF structure \param thisone the fff_glm_KF structure to be iterated \param nloop number of refinement iterations \param y current signal sample \param x current regressor values \param yy previous signal sample \param xx previous regressor values */ extern void fff_glm_RKF_iterate( fff_glm_RKF* thisone, unsigned int nloop, double y, const fff_vector* x, double yy, const fff_vector* xx ); /*! \brief Perform an ordinary least square regression using the standard Kalman filter and return the degrees of freedom \param thisone the fff_glm_KF structure to be filled in \param y input data \param X design matrix (column-wise stored covariates) */ extern void fff_glm_KF_fit( fff_glm_KF* thisone, const fff_vector* y, const fff_matrix* X ); /*! \brief Perform a linear regression using the refined Kalman filter, corresponding to a GLM with AR(1) errors. \param thisone the fff_glm_RKF structure to be filled in \param nloop number of refinement iterations \param y input data \param X design matrix (column-wise stored covariates) */ extern void fff_glm_RKF_fit( fff_glm_RKF* thisone, unsigned int nloop, const fff_vector* y, const fff_matrix* X ); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_glm_twolevel.c000066400000000000000000000071241470056100100174670ustar00rootroot00000000000000#include "fff_glm_twolevel.h" #include "fff_base.h" #include "fff_blas.h" #include #include #include /* b, s2 are initialized using the values passed to the function. The function requires the projected pseudo-inverse matrix PpiX to be pre-calculated externally. It is defined by: PpiX = P * (X'X)^-1 X' where: P = Ip - A C' (C A C')^-1 C with A = (X'X)^-1 is the appropriate projector onto the constaint space, Cb=0. P is, in fact, orthogonal for the dot product defined by X'X. PpiX is p x n. The equality PpiX*X=P is not checked. */ fff_glm_twolevel_EM* fff_glm_twolevel_EM_new(size_t n, size_t p) { fff_glm_twolevel_EM* thisone; thisone = (fff_glm_twolevel_EM*)malloc(sizeof(fff_glm_twolevel_EM)); if (thisone==NULL) return NULL; thisone->n = n; thisone->p = p; thisone->s2 = FFF_POSINF; thisone->b = fff_vector_new(p); thisone->z = fff_vector_new(n); thisone->vz = fff_vector_new(n); thisone->Qz = fff_vector_new(n); return thisone; } void fff_glm_twolevel_EM_delete(fff_glm_twolevel_EM* thisone) { if (thisone==NULL) return; fff_vector_delete(thisone->b); fff_vector_delete(thisone->z); fff_vector_delete(thisone->vz); fff_vector_delete(thisone->Qz); free(thisone); } void fff_glm_twolevel_EM_init(fff_glm_twolevel_EM* em) { fff_vector_set_all(em->b, 0.0); em->s2 = FFF_POSINF; return; } void fff_glm_twolevel_EM_run(fff_glm_twolevel_EM* em, const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_matrix* PpiX, unsigned int niter) { unsigned int iter = 0; size_t n=X->size1, i; double *yi, *zi, *vyi, *vzi; double w1, w2; double m = 0.0; while (iter < niter) { /*** E step ***/ /* Compute current prediction estimate: z = X*b */ fff_blas_dgemv(CblasNoTrans, 1.0, X, em->b, 0.0, em->z); /* Posterior mean and variance of each "true" effect: vz = 1/(1/vy + 1/s2) z = vz * (y/vy + X*b/s2) */ w2 = FFF_ENSURE_POSITIVE(em->s2); w2 = 1/w2; for(i=0, yi=y->data, zi=em->z->data, vyi=vy->data, vzi=em->vz->data; istride, zi+=em->z->stride, vyi+=vy->stride, vzi+=em->vz->stride) { w1 = FFF_ENSURE_POSITIVE(*vyi); w1 = 1/w1; *vzi = 1/(w1+w2); *zi = *vzi * (w1*(*yi) + w2*(*zi)); } /*** M step ***/ /* Update effect: b = PpiX * z */ fff_blas_dgemv(CblasNoTrans, 1.0, PpiX, em->z, 0.0, em->b); /* Update variance: s2 = (1/n) [ sum((z-Xb).^2) + sum(vz) ] */ fff_vector_memcpy(em->Qz, em->z); fff_blas_dgemv(CblasNoTrans, 1.0, X, em->b, -1.0, em->Qz); /* Qz= Xb-z = Proj_X(z) - z */ em->s2 = (fff_vector_ssd(em->Qz, &m, 1) + fff_vector_sum(em->vz)) / (long double)n; /*** Increment iteration number ***/ iter ++; } return; } /* Log-likelihood computation. ri = y - Xb -2 LL = n log(2pi) + \sum_i log (s^2 + si^2) + \sum_i ri^2/(s^2 + si^2) We omit the nlog(2pi) term as it is constant. */ double fff_glm_twolevel_log_likelihood(const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_vector* b, double s2, fff_vector* tmp) { double LL = 0.0, w; size_t n=X->size1, i; double *ri, *vyi; /* Compute residuals: tmp = y - X b */ fff_vector_memcpy(tmp, y); fff_blas_dgemv(CblasNoTrans, -1.0, X, b, 1.0, tmp); /* Incremental computation */ for(i=0, ri=tmp->data, vyi=vy->data; istride, vyi+=vy->stride) { w = *vyi + s2; w = FFF_ENSURE_POSITIVE(w); LL += log(w); LL += FFF_SQR(*ri)/w; } /* Finalize computation */ LL *= -0.5; return LL; } nipy-0.6.1/lib/fff/fff_glm_twolevel.h000066400000000000000000000036411470056100100174740ustar00rootroot00000000000000/*! \file fff_glm_twolevel.h \brief General linear model under observation errors (mixed effects) \author Alexis Roche \date 2008 Bla bla bla */ #ifndef FFF_GLM_TWOLEVEL #define FFF_GLM_TWOLEVEL #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" /*! \struct fff_glm_twolevel_EM \brief Structure for the mixed-effect general linear model This structure is intended for multiple regression under mixed effects using the EM algorithm. */ typedef struct{ size_t n; /*! Number of observations */ size_t p; /*! Number of regresssors */ fff_vector* b; /*! Effect estimate */ double s2; /*! Variance estimate */ fff_vector* z; /*! Expected true effects */ fff_vector* vz; /*! Expected variance of the true effects (diagonal matrix) */ fff_vector* Qz; /* Expected prediction error */ unsigned int niter; /* Number of iterations */ } fff_glm_twolevel_EM; extern fff_glm_twolevel_EM* fff_glm_twolevel_EM_new(size_t n, size_t p); extern void fff_glm_twolevel_EM_delete(fff_glm_twolevel_EM* thisone); extern void fff_glm_twolevel_EM_init(fff_glm_twolevel_EM* em); /* \a PpiX is defined by: \f$ PpiX = P (X'X)^{-1} X' \f$, where: \f$ P = I_p - A C (C' A C)^{-1} C' \f$ with \f$ A = (X'X)^-1 \f$ is the appropriate projector onto the constaint space, \f$ C'b=0 \f$. \a P is, in fact, orthogonal for the dot product defined by \a X'X. Please note that the equality \a PpiX*X=P should hold but is not checked. */ extern void fff_glm_twolevel_EM_run(fff_glm_twolevel_EM* em, const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_matrix* PpiX, unsigned int niter); extern double fff_glm_twolevel_log_likelihood( const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_vector* b, double s2, fff_vector* tmp ); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_lapack.c000066400000000000000000000157241470056100100162270ustar00rootroot00000000000000#include "fff_base.h" #include "fff_lapack.h" #include #define FNAME FFF_FNAME /* dgetrf : LU decomp dpotrf: Cholesky decomp dgesdd: SVD decomp dgeqrf: QR decomp */ #define CHECK_SQUARE(A) \ if ( (A->size1) != (A->size2) ) \ FFF_ERROR("Not a square matrix", EDOM) #define LAPACK_UPLO(Uplo) ( (Uplo)==(CblasUpper) ? "U" : "L" ) extern int FNAME(dgetrf)(int* m, int* n, double* a, int* lda, int* ipiv, int* info); extern int FNAME(dpotrf)(char *uplo, int* n, double* a, int* lda, int* info); extern int FNAME(dgesdd)(char *jobz, int* m, int* n, double* a, int* lda, double* s, double* u, int* ldu, double* vt, int* ldvt, double* work, int* lwork, int* iwork, int* info); extern int FNAME(dgeqrf)(int* m, int* n, double* a, int* lda, double* tau, double* work, int* lwork, int* info); /* Cholesky decomposition */ /*** Aux needs be square with the same size as A ***/ int fff_lapack_dpotrf( CBLAS_UPLO_t Uplo, fff_matrix* A, fff_matrix* Aux ) { char* uplo = LAPACK_UPLO(Uplo); int info; int n = (int)A->size1; /* Assumed squared */ int lda = (int)Aux->tda; CHECK_SQUARE(A); fff_matrix_transpose( Aux, A ); FNAME(dpotrf)(uplo, &n, Aux->data, &lda, &info); fff_matrix_transpose( A, Aux ); return info; } /* LU decomposition */ /*** Aux needs be m x n with m=A->size2 and n=A->size1 ***/ /*** ipiv needs be 1d contiguous in int with size min(m,n) ***/ int fff_lapack_dgetrf( fff_matrix* A, fff_array* ipiv, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int lda = (int)Aux->tda; if ( (ipiv->ndims != 1) || (ipiv->datatype != FFF_INT) || (ipiv->dimX != FFF_MIN(m,n)) || (ipiv->offsetX != 1) ) FFF_ERROR("Invalid array: Ipiv", EDOM); fff_matrix_transpose( Aux, A ); FNAME(dgetrf)(&m, &n, Aux->data, &lda, (int*)ipiv->data, &info); fff_matrix_transpose( A, Aux ); return info; } /* QR decomposition */ /*** Aux needs be m x n with m=A->size2 and n=A->size1 ***/ /*** tau needs be contiguous with size min(m,n) ***/ /*** work needs be contiguous with size >= n ***/ int fff_lapack_dgeqrf( fff_matrix* A, fff_vector* tau, fff_vector* work, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int lda = (int)Aux->tda; int lwork = (int)work->size; if ( (tau->size != FFF_MIN(m,n)) || (tau->stride != 1) ) FFF_ERROR("Invalid vector: tau", EDOM); /* Resets lwork to -1 if the input work vector is too small (in which case work only needs be of size >= 1) */ if ( lwork < n ) lwork = -1; else if ( work->stride != 1 ) FFF_ERROR("Invalid vector: work", EDOM); fff_matrix_transpose( Aux, A ); FNAME(dgeqrf)(&m, &n, Aux->data, &lda, tau->data, work->data, &lwork, &info); fff_matrix_transpose( A, Aux ); return info; } /* SVD decomposition */ /*** Aux needs be square with size max(m=A->size2, n=A->size1) ***/ /*** s needs be contiguous with size min(m,n) ***/ /*** U needs be m x m ***/ /*** Vt needs be n x n ***/ /*** work needs be contiguous, with size lwork such that dmin = min(M,N) dmax = max(M,N) lwork >= 3*dmin**2 + max(dmax,4*dmin**2+4*dmin) ***/ /*** iwork needs be 1d contiguous in int with size 8*min(m,n) ***/ int fff_lapack_dgesdd( fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int dmin = FFF_MIN(m,n); int dmax = FFF_MAX(m,n); int a1 = FFF_SQR(dmin); int a2 = 4*(a1+dmin); int lwork_min = 3*a1 + FFF_MAX(dmax, a2); int lda = (int)Aux->tda; int ldu = (int)U->tda; int ldvt = (int)Vt->tda; int lwork = work->size; fff_matrix Aux_mm, Aux_nn; CHECK_SQUARE(U); CHECK_SQUARE(Vt); CHECK_SQUARE(Aux); if ( U->size1 != m) FFF_ERROR("Invalid size for U", EDOM); if ( Vt->size1 != n) FFF_ERROR("Invalid size for Vt", EDOM); if ( Aux->size1 != dmax) FFF_ERROR("Invalid size for Aux", EDOM); if ( (s->size != dmin) || (s->stride != 1) ) FFF_ERROR("Invalid vector: s", EDOM); if ( (iwork->ndims != 1) || (iwork->datatype != FFF_INT) || (iwork->dimX != 8*dmin) || (iwork->offsetX != 1 ) ) FFF_ERROR("Invalid array: Iwork", EDOM); /* Resets lwork to -1 if the input work vector is too small (in which case work only needs be of size >= 1) */ if ( lwork < lwork_min ) lwork = -1; else if ( work->stride != 1 ) FFF_ERROR("Invalid vector: work", EDOM); /* Perform the svd on A**t: A**t = U* S* Vt* => A = V* S* Ut* => U = V*, V = U*, s = s* so we just need to swap m <-> n, and U <-> Vt in the input line */ FNAME(dgesdd)("A", &n, &m, A->data, &lda, s->data, Vt->data, &ldvt, U->data, &ldu, work->data, &lwork, (int*)iwork->data, &info); /* At this point, both U and V are in Fortran order, so we need to transpose */ Aux_mm = fff_matrix_block( Aux, 0, m, 0, m ); fff_matrix_transpose(&Aux_mm, U); fff_matrix_memcpy(U, &Aux_mm); Aux_nn = fff_matrix_block( Aux, 0, n, 0, n ); fff_matrix_transpose(&Aux_nn, Vt); fff_matrix_memcpy(Vt, &Aux_nn); return info; } /* simply do the pre-allocations to simplify the use of SVD*/ static int _fff_lapack_SVD(fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt) { int n = A->size1; int m = A->size2; int dmin = FFF_MIN(m,n); int dmax = FFF_MAX(m,n); int lwork = 2* (3*dmin*dmin + FFF_MAX(dmax,4*dmin*dmin + 4*dmin)); int liwork = 8* dmin; fff_vector *work = fff_vector_new(lwork); fff_array *iwork = fff_array_new1d(FFF_INT,liwork); fff_matrix *Aux = fff_matrix_new(dmax,dmax); int info = fff_lapack_dgesdd(A,s,U,Vt,work,iwork,Aux ); fff_vector_delete(work); fff_array_delete(iwork); fff_matrix_delete(Aux); return info; } /* Compute the determinant of a symmetric matrix */ /* caveat : A is modified */ extern double fff_lapack_det_sym(fff_matrix* A) { int i,n = A->size1; fff_matrix* U = fff_matrix_new(n,n); fff_matrix* Vt = fff_matrix_new(n,n); fff_vector* s = fff_vector_new(n); double det; _fff_lapack_SVD(A,s,U,Vt); for (i=0, det=1; isize1; fff_matrix* U = fff_matrix_new(n,n); fff_matrix* Vt = fff_matrix_new(n,n); fff_vector* s = fff_vector_new(n); fff_matrix* iS = fff_matrix_new(n,n); fff_matrix* aux = fff_matrix_new(n,n); int info = _fff_lapack_SVD(A,s,U,Vt); fff_matrix_set_all(iS,0); for (i=0 ; isize1=A->size2 and \a A->size2=B->size1, then do \a fff_matrix_transpose(B,A). Then, we may call LAPACK with \a B->data as array input, \a m=B->size2=A->size1 rows, \a n=B->size1=A->size2 columns and \a lda=B->tda leading dimension. The same procedure works to perform convertion in the other way: the "C sizes" are just the swapped "Fortan sizes". */ #ifndef FFF_LAPACK #define FFF_LAPACK #ifdef __cplusplus extern "C" { #endif #include "fff_blas.h" #include "fff_array.h" /*! \brief Cholesky decomposition \param Uplo flag \param A N-by-N matrix \param Aux N-by-N auxiliary matrix The factorization has the form \f$ A = U^t U \f$, if \c Uplo==CblasUpper, or \f$ A = L L^t\f$, if \c Uplo==CblasLower, where \a U is an upper triangular matrix and \a L is lower triangular. On entry, if \c Uplo==CblasUpper, the leading N-by-N upper triangular part of \c A contains the upper triangular part of the matrix \a A, and the strictly lower triangular part of A is not referenced. If \c Uplo==CblasLower, the leading N-by-N lower triangular part of \a A contains the lower triangular part of the matrix \a A, and the strictly upper triangular part of \a A is not referenced. On exit, \a A contains the factor \a U or \a L from the Cholesky factorization. */ extern int fff_lapack_dpotrf( CBLAS_UPLO_t Uplo, fff_matrix* A, fff_matrix* Aux ); /*! \brief LU decomposition \param A M-by-N matrix \param ipiv pivot indices with size min(M,N) \param Aux N-by-M auxiliary matrix On entry, \a A is the M-by-N matrix to be factored. On exit, it contains the factors \a L and \a U from the factorization \a A=PLU, where \a P is a permutation matrix, \a L is a lower triangular matrix with unit diagonal elements (not stored) and \a U is upper triangular. \a ipiv needs be one-dimensional contiguous in \c FFF_INT with size min(M,N) */ extern int fff_lapack_dgetrf( fff_matrix* A, fff_array* ipiv, fff_matrix* Aux ); /*! \brief QR decomposition \param A M-by-N matrix \param tau scalar factors of the elementary reflectors with size min(M,N) \param work auxiliary vector with size >= N \param Aux N-by-M auxiliary matrix Computes matrices \a Q and \a R such that \a A=QR where \a Q is orthonormal and \a R is triangular. On entry, \a A is an M-by-N matrix. On exit, the elements on and above the diagonal of \a A contain the min(M,N)-by-N upper trapezoidal matrix \a R (\a R is upper triangular if \f$ M \geq N\f$); the elements below the diagonal, with the array \a tau, represent the orthogonal matrix \a Q as a product of min(M,N) reflectors. Each \a H(i) has the form \f$ H(i) = I - \tau v v^t \f$ where \f$ \tau \f$ is a real scalar, and \a v is a real vector with v(1:i-1) = 0 and \a v(i)=1; \a v(i+1:M) is stored on exit in \a A(i+1:M,i), and \f$ \tau \f$ in \a tau(i). If \a work is of size 1, then the routine only computes the optimal size for \a work and stores the result in \c work->data[0]. For the actual computation, \a work should be contiguous with size at least N. \a tau needs be contiguous as well. TODO: actually compute \a R using \c dorgqr. */ extern int fff_lapack_dgeqrf( fff_matrix* A, fff_vector* tau, fff_vector* work, fff_matrix* Aux ); /*! \brief Singular Value Decomposition \param A M-by-N matrix to decompose (to be overwritten) \param s singular values in descending order, with size min(M,N) \param U M-by-M matrix \param Vt N-by-N matrix \param work auxiliary vector \param iwork auxiliary array of integers \param Aux auxiliary square matrix with size max(M,N) Computes a diagonal matrix \a S and orthonormal matrices \a U and \a Vt such that \f$ A = U S V^t \f$. If \a work is of size 1, then the routine only computes the optimal size for \a work and stores the result in \c work->data[0]. For the actual computation, \a work should be contiguous with size at least: \f$ L_{work} \geq 3 d_{\min}^2 + \max(d_{\max}, 4 (d_{\min}^2 + d_{\min})) \f$ where \f$ d_{\min}=\min(M,N) \f$ and \f$ d_{\max}=\max(M,N) \f$. For good performance, \f$ L_{work} \f$ should generally be larger. \a iwork needs be one-dimensional contiguous in \c FFF_INT with size 8*min(M,N) */ extern int fff_lapack_dgesdd( fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux ); /* \brief Computation of the determinant of symmetric matrices \param A M-by-M matrix (to be overwritten) The determinant is returned as output of the function. The procedure uses the SVD hence it is valid only for symmetric matrices. It is not meant to be optimal at the moment. Caveat : no check is performed -- untested version */ extern double fff_lapack_det_sym(fff_matrix* A); /* \brief Computation of the inverse of of symmetric matrices \param iA The resulting output matrix \param A M-by-M matrix to be inverted (to be overwritten) The determinant is returned as output of the function. The procedure uses the SVD hence it is valid only for symmetric matrices. It is not meant to be optimal at the moment. Caveat : no check is performed -- untested version */ extern int fff_lapack_inv_sym(fff_matrix* iA, fff_matrix *A); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_matrix.c000066400000000000000000000162121470056100100162710ustar00rootroot00000000000000#include "fff_base.h" #include "fff_matrix.h" #include #include #include fff_matrix* fff_matrix_new(size_t size1, size_t size2) { fff_matrix* thisone; thisone = (fff_matrix*)calloc(1, sizeof(fff_matrix)); if (thisone == NULL) { FFF_ERROR("Allocation failed", ENOMEM); return NULL; } thisone->data = (double*)calloc(size1*size2, sizeof(double)); if (thisone->data == NULL) FFF_ERROR("Allocation failed", ENOMEM); thisone->size1 = size1; thisone->size2 = size2; thisone->tda = size2; thisone->owner = 1; return thisone; } void fff_matrix_delete(fff_matrix* thisone) { if (thisone->owner) if (thisone->data != NULL) free(thisone->data); free(thisone); return; } /* View */ fff_matrix fff_matrix_view(const double* data, size_t size1, size_t size2, size_t tda) { fff_matrix A; A.size1 = size1; A.size2 = size2; A.tda = tda; A.owner = 0; A.data = (double*)data; return A; } /* Get element */ double fff_matrix_get (const fff_matrix * A, size_t i, size_t j) { return(A->data[i*A->tda + j]); } /* Set element */ void fff_matrix_set (fff_matrix * A, size_t i, size_t j, double a) { A->data[i*A->tda + j] = a; return; } /* Set all elements */ void fff_matrix_set_all (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA = a; } return; } /* Set all diagonal elements to a, others to zero */ void fff_matrix_set_scalar (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) { if (j == i) *bA = a; else *bA = 0.0; } } return; } /* Global scaling */ void fff_matrix_scale (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA *= a; } return; } /* Add constant */ void fff_matrix_add_constant (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA += a; } return; } /* Row view */ fff_vector fff_matrix_row(const fff_matrix* A, size_t i) { fff_vector x; x.size = A->size2; x.stride = 1; x.owner = 0; x.data = A->data + i*A->tda; return x; } /* Column view */ fff_vector fff_matrix_col(const fff_matrix* A, size_t j) { fff_vector x; x.size = A->size1; x.stride = A->tda; x.owner = 0; x.data = A->data + j; return x; } /* Diagonal view */ fff_vector fff_matrix_diag(const fff_matrix* A) { fff_vector x; x.size = FFF_MIN(A->size1, A->size2); x.stride = A->tda + 1; x.owner = 0; x.data = A->data; return x; } /* Block view */ fff_matrix fff_matrix_block(const fff_matrix* A, size_t imin, size_t nrows, size_t jmin, size_t ncols) { fff_matrix Asub; Asub.size1 = nrows; Asub.size2 = ncols; Asub.tda = A->tda; Asub.owner = 0; Asub.data = A->data + jmin + imin*A->tda; return Asub; } /* Row copy */ void fff_matrix_get_row (fff_vector * x, const fff_matrix * A, size_t i) { fff_vector xc = fff_matrix_row(A, i); fff_vector_memcpy(x, &xc); return; } /* Column copy */ void fff_matrix_get_col (fff_vector * x, const fff_matrix * A, size_t j) { fff_vector xc = fff_matrix_col(A, j); fff_vector_memcpy(x, &xc); return; } /* Diag copy */ void fff_matrix_get_diag (fff_vector * x, const fff_matrix * A) { fff_vector xc = fff_matrix_diag(A); fff_vector_memcpy(x, &xc); return; } /* Set row */ void fff_matrix_set_row (fff_matrix * A, size_t i, const fff_vector * x) { fff_vector xc = fff_matrix_row(A, i); fff_vector_memcpy(&xc, x); return; } /* Set column */ void fff_matrix_set_col (fff_matrix * A, size_t j, const fff_vector * x) { fff_vector xc = fff_matrix_col(A, j); fff_vector_memcpy(&xc, x); return; } /* Set diag */ void fff_matrix_set_diag (fff_matrix * A, const fff_vector * x) { fff_vector xc = fff_matrix_diag(A); fff_vector_memcpy(&xc, x); return; } /** Methods involving two matrices **/ #define CHECK_SIZE(A,B) \ if ((A->size1) != (B->size1) || (A->size2 != B->size2)) \ FFF_ERROR("Matrices have different sizes", EDOM) #define CHECK_TRANSPOSED_SIZE(A,B) \ if ((A->size1) != (B->size2) || (A->size2 != B->size1)) \ FFF_ERROR("Incompatible matrix sizes", EDOM) /* Copy B in A */ void fff_matrix_memcpy (fff_matrix * A, const fff_matrix * B) { CHECK_SIZE(A, B); /* If both matrices are contiguous in memory, use memcpy, otherwise perform a loop */ if ((A->tda == A->size2) && (B->tda == B->size2)) memcpy((void*)A->data, (void*)B->data, A->size1*A->size2*sizeof(double)); else { size_t i, j, rA, rB; double *bA, *bB; for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA = *bB; } } return; } /* Transpose a matrix: A = B**t. A needs be preallocated This is equivalent to turning the matrix in Fortran convention (column-major order) if initially in C convention (row-major order), and the other way round. */ void fff_matrix_transpose(fff_matrix* A, const fff_matrix* B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_TRANSPOSED_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; bB = B->data + i; for(j=0; jsize2; j++, bA++, bB+=B->tda) *bA = *bB; } return; } /* Add two matrices */ void fff_matrix_add (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA += *bB; } return; } /* Compute: A = A - B */ void fff_matrix_sub (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA -= *bB; } return; } /* Element-wise multiplication */ void fff_matrix_mul_elements (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA *= *bB; } return; } /* Element-wise division */ void fff_matrix_div_elements (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA /= *bB; } return; } long double fff_matrix_sum(const fff_matrix* A) { long double sum = 0.0; fff_vector a; double *buf; size_t i; for(i=0, buf=A->data; isize1; i++, buf+=A->tda) { a = fff_vector_view(buf, A->size2, 1); sum += fff_vector_sum(&a); } return sum; } nipy-0.6.1/lib/fff/fff_matrix.h000066400000000000000000000055661470056100100163100ustar00rootroot00000000000000/*! \file fff_matrix.h \brief fff matrix object \author Alexis Roche \date 2003-2008 */ #ifndef FFF_MATRIX #define FFF_MATRIX #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include /*! \struct fff_matrix \brief The fff matrix structure */ typedef struct { size_t size1; size_t size2; size_t tda; double* data; int owner; } fff_matrix; /*! \brief fff matrix constructor \param size1 number of rows \param size2 number of columns */ extern fff_matrix* fff_matrix_new( size_t size1, size_t size2 ); /*! \brief fff matrix destructor \param thisone instance to delete */ extern void fff_matrix_delete( fff_matrix* thisone ); extern double fff_matrix_get (const fff_matrix * A, size_t i, size_t j); extern void fff_matrix_set (fff_matrix * A, size_t i, size_t j, double a); extern void fff_matrix_set_all (fff_matrix * A, double a); /*! \brief Set all diagonal elements to \a a, others to zero */ extern void fff_matrix_set_scalar (fff_matrix * A, double a); extern void fff_matrix_scale (fff_matrix * A, double a); extern void fff_matrix_add_constant (fff_matrix * A, double a); /** NOT TESTED! **/ extern long double fff_matrix_sum(const fff_matrix* A); /*** Views ***/ extern fff_matrix fff_matrix_view(const double* data, size_t size1, size_t size2, size_t tda); extern fff_vector fff_matrix_row(const fff_matrix* A, size_t i); extern fff_vector fff_matrix_col(const fff_matrix* A, size_t j); extern fff_vector fff_matrix_diag(const fff_matrix* A); extern fff_matrix fff_matrix_block(const fff_matrix* A, size_t imin, size_t nrows, size_t jmin, size_t ncols ); extern void fff_matrix_get_row (fff_vector * x, const fff_matrix * A, size_t i); extern void fff_matrix_get_col (fff_vector * x, const fff_matrix * A, size_t j) ; extern void fff_matrix_get_diag (fff_vector * x, const fff_matrix * A); extern void fff_matrix_set_row (fff_matrix * A, size_t i, const fff_vector * x); extern void fff_matrix_set_col (fff_matrix * A, size_t j, const fff_vector * x); extern void fff_matrix_set_diag (fff_matrix * A, const fff_vector * x); extern void fff_matrix_memcpy (fff_matrix * A, const fff_matrix * B); /*! \brief transpose a matrix \param B input matrix \param A transposed matrix on exit The matrix \c A needs be pre-allocated consistently with \c B, so that \c A->size1==B->size2 and \c A->size2==B->size1. */ extern void fff_matrix_transpose( fff_matrix* A, const fff_matrix* B ); extern void fff_matrix_add (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_sub (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_mul_elements (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_div_elements (fff_matrix * A, const fff_matrix * B); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_onesample_stat.c000066400000000000000000001036721470056100100200120ustar00rootroot00000000000000#include "fff_onesample_stat.h" #include "fff_base.h" #include "fff_blas.h" #include #include #include #include #define EL_LDA_TOL 1e-5 #define EL_LDA_ITERMAX 100 #define MIN_RELATIVE_VAR_FFX 1e-4 /* Dummy structure for sorting */ typedef struct{ double x; size_t i; } fff_indexed_data; /* Static structure for empirical MFX stats */ typedef struct{ fff_vector* w; /* weights */ fff_vector* z; /* centers */ fff_matrix* Q; fff_vector* tvar; /* low thresholded variances */ fff_vector* tmp1; fff_vector* tmp2; fff_indexed_data* idx; unsigned int* niter; } fff_onesample_mfx; /* Declaration of static functions */ /** Pure RFX analysis **/ static double _fff_onesample_mean(void* params, const fff_vector* x, double base); static double _fff_onesample_median(void* params, const fff_vector* x, double base); static double _fff_onesample_student(void* params, const fff_vector* x, double base); static double _fff_onesample_laplace(void* params, const fff_vector* x, double base); static double _fff_onesample_tukey(void* params, const fff_vector* x, double base); static double _fff_onesample_sign_stat(void* params, const fff_vector* x, double base); static double _fff_onesample_wilcoxon(void* params, const fff_vector* x, double base); static double _fff_onesample_elr(void* params, const fff_vector* x, double base); static double _fff_onesample_grubb(void* params, const fff_vector* x, double base); static void _fff_absolute_residuals(fff_vector* r, const fff_vector* x, double base); static double _fff_el_solve_lda(fff_vector* c, const fff_vector* w); /** Normal MFX analysis **/ static double _fff_onesample_LR_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_mean_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base); static void _fff_onesample_gmfx_EM(double* m, double* v, const fff_vector* x, const fff_vector* var, unsigned int niter, int constraint); static double _fff_onesample_gmfx_nll(const fff_vector* x, const fff_vector* var, double m, double v); /** Empirical MFX analysis **/ static fff_onesample_mfx* _fff_onesample_mfx_new(unsigned int n, unsigned int* niter, int flagIdx); static void _fff_onesample_mfx_delete(fff_onesample_mfx* thisone); static double _fff_onesample_mean_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_median_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_sign_stat_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_wilcoxon_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_LR_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static void _fff_onesample_mfx_EM(fff_onesample_mfx* Params, const fff_vector* x, const fff_vector* var, int constraint); static void _fff_onesample_mfx_EM_init(fff_onesample_mfx* Params, const fff_vector* x, int flag); static double _fff_onesample_mfx_nll(fff_onesample_mfx* Params, const fff_vector* x); /** Low level for qsort **/ static int _fff_abs_comp(const void * x, const void * y); static int _fff_indexed_data_comp(const void * x, const void * y); static void _fff_sort_z(fff_indexed_data* idx, fff_vector* tmp1, fff_vector* tmp2, const fff_vector* z, const fff_vector* w); fff_onesample_stat* fff_onesample_stat_new(unsigned int n, fff_onesample_stat_flag flag, double base) { fff_onesample_stat* thisone = (fff_onesample_stat*)malloc(sizeof(fff_onesample_stat)); if (thisone == NULL) return NULL; /* Fields */ thisone->flag = flag; thisone->base = base; thisone->params = NULL; /* Switch (possibly overwrite the 'par' field)*/ switch (flag) { case FFF_ONESAMPLE_EMPIRICAL_MEAN: thisone->compute_stat = &_fff_onesample_mean; break; case FFF_ONESAMPLE_EMPIRICAL_MEDIAN: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_median; break; case FFF_ONESAMPLE_STUDENT: thisone->compute_stat = &_fff_onesample_student; break; case FFF_ONESAMPLE_LAPLACE: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_laplace; break; case FFF_ONESAMPLE_TUKEY: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_tukey; break; case FFF_ONESAMPLE_SIGN_STAT: thisone->compute_stat = &_fff_onesample_sign_stat; break; case FFF_ONESAMPLE_WILCOXON: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_wilcoxon; break; case FFF_ONESAMPLE_ELR: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_elr; break; case FFF_ONESAMPLE_GRUBB: thisone->compute_stat = &_fff_onesample_grubb; break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } /* End switch */ return thisone; } void fff_onesample_stat_delete(fff_onesample_stat* thisone) { if (thisone == NULL) return; /* Switch */ switch (thisone->flag) { default: break; case FFF_ONESAMPLE_LAPLACE: case FFF_ONESAMPLE_TUKEY: case FFF_ONESAMPLE_WILCOXON: case FFF_ONESAMPLE_ELR: fff_vector_delete((fff_vector*)thisone->params); break; } /* End switch */ free(thisone); } double fff_onesample_stat_eval(fff_onesample_stat* thisone, const fff_vector* x) { double t; t = thisone->compute_stat(thisone->params, x, thisone->base); return t; } /********************************** SAMPLE MEAN *******************************/ static double _fff_onesample_mean(void* params, const fff_vector* x, double base) { double aux; if (params != NULL) return FFF_NAN; aux = fff_vector_sum(x)/(long double)x->size - base; return aux; } /********************************** SAMPLE MEDIAN ****************************/ static double _fff_onesample_median(void* params, const fff_vector* x, double base) { double aux; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp) - base; return aux; } /********************************** STUDENT STATISTIC ****************************/ static double _fff_onesample_student(void* params, const fff_vector* x, double base) { double m, std, aux; int sign; size_t n = x->size; if (params != NULL) return FFF_NAN; std = sqrt(fff_vector_ssd(x, &m, 0)/(long double)x->size); aux = sqrt((double)(n-1))*(m-base); sign = (int) FFF_SIGN(aux); if (sign == 0) /* Sample mean equals baseline, return zero */ return 0.0; aux = aux / std; if (sign > 0) if (aux < FFF_POSINF) return aux; else return FFF_POSINF; else if (aux > FFF_NEGINF) return aux; else return FFF_NEGINF; } /********************************** LAPLACE STATISTIC ****************************/ static double _fff_onesample_laplace(void* params, const fff_vector* x, double base) { double s, s0, aux; int sign; size_t n = x->size; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp); s = fff_vector_sad(x, aux)/(long double)x->size; s0 = fff_vector_sad(x, base)/(long double)x->size; s0 = FFF_MAX(s0, s); /* Ensure s0 >= s */ aux -= base; sign = FFF_SIGN(aux); if (sign == 0) /* Sample median equals baseline, return zero */ return 0.0; aux = sqrt(2*n*log(s0/s)); if (aux < FFF_POSINF) return (sign * aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /********************************** TUKEY STATISTIC ******************************/ static void _fff_absolute_residuals(fff_vector* r, const fff_vector* x, double base) { size_t i, n = x->size; double aux; double *bufX = x->data, *bufR = r->data; for(i=0; istride, bufR+=r->stride) { aux = *bufX - base; *bufR = FFF_ABS(aux); } return; } static double _fff_onesample_tukey(void* params, const fff_vector* x, double base) { double s, s0, aux; int sign; size_t n = x->size; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp); /* Take the median of absolute residuals |x_i-median| */ _fff_absolute_residuals(tmp, x, aux); s = fff_vector_median(tmp); /* Take the median of absolute residuals |x_i-base| */ _fff_absolute_residuals(tmp, x, base); s0 = fff_vector_median(tmp); s0 = FFF_MAX(s0, s); /* Ensure s0 >= s */ aux -= base; /* aux == median(x) - base */ sign = FFF_SIGN(aux); if (sign == 0) /* Sample median equals baseline, return zero */ return 0.0; aux = sqrt(2*n*log(s0/s)); if (aux < FFF_POSINF) return (sign * aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /********************************** SIGN STATISTIC ****************************/ static double _fff_onesample_sign_stat(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double rp = 0.0, rm = 0.0, aux; double* buf = x->data; if (params != NULL) return FFF_NAN; for (i=0; istride) { aux = *buf - base; if (aux > 0.0) rp ++; else if (aux < 0.0) rm ++; else { /* in case the sample value is exactly zero */ rp += .5; rm += .5; } } return (rp-rm)/(double)n; } /********************* WILCOXON (SIGNED RANK) STATISTIC *********************/ static int _fff_abs_comp(const void * x, const void * y) { int ans = 1; double xx = *((double*)x); double yy = *((double*)y); xx = FFF_ABS(xx); yy = FFF_ABS(yy); if (yy > xx) { ans = -1; return ans; } if (yy == xx) ans = 0; return ans; } static double _fff_onesample_wilcoxon(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double t = 0.0; double* buf; fff_vector* tmp = (fff_vector*)params; /* Compute the residuals wrt baseline */ fff_vector_memcpy(tmp, x); fff_vector_add_constant(tmp, -base); /* Sort the residuals in terms of their ABSOLUTE values NOTE: tmp needs be contiguous -- and it is, if allocated using fff_onesample_stat_new */ qsort (tmp->data, n, sizeof(double), &_fff_abs_comp); /* Compute the sum of ranks multiplied by corresponding elements' signs */ buf = tmp->data; for(i=1; i<=n; i++, buf++) /* Again buf++ works IFF tmp is contiguous */ t += (double)i * FFF_SIGN(*buf); /* Normalization to have the stat range in [-1,1] */ /* t /= (double)((n*(n+1))/2);*/ /* Normalization */ t /= ((double)(n*n)); return t; } /************************ EMPIRICAL LIKELIHOOD STATISTIC **********************/ static double _fff_onesample_elr(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double lda, aux, nwi; int sign; fff_vector* tmp = (fff_vector*)params; double* buf; /* Compute: tmp = x-base */ fff_vector_memcpy(tmp, x); fff_vector_add_constant(tmp, -base); aux = fff_vector_sum(tmp)/(long double)tmp->size; sign = FFF_SIGN(aux); /* If sample mean equals baseline, return zero */ if (sign == 0) return 0.0; /* Find the Lagrange multiplier corresponding to the constrained empirical likelihood maximization problem */ lda = _fff_el_solve_lda(tmp, NULL); if (lda >= FFF_POSINF) { if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* Compute the log empirical likelihood ratio, log lda = \sum_i \log(nw_i) */ buf = x->data; aux = 0.0; for(i=0; istride) { nwi = 1/(1 + lda*(*buf-base)); nwi = FFF_MAX(nwi, 0.0); aux += log(nwi); } /* We output \sqrt{-2\log\lambda} multiplied by the effect's sign */ aux = -2.0 * aux; aux = sqrt(FFF_MAX(aux, 0.0)); if (aux < FFF_POSINF) return (sign*aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* Solve the equation: sum(wi*ci/(lda*ci+1)) = 0 where the unknown is lda and ci is the constraint, e.g. ci = xi-m. In standard RFX context, wi is uniformly constant, while in MFX context it may vary from one datapoint to another. By transforming ci into -1./ci, the equation becomes: sum(wi/ (lda-ci)) = 0 */ static double _fff_el_solve_lda(fff_vector* c, const fff_vector* w) { size_t i, n = c->size; unsigned int iter = 0; double aux, g, dg, lda, lda0 = FFF_NEGINF, lda1 = FFF_POSINF, ldac, err; double *buf, *bufW; /* Transform the constraint vector: c = -1./c and find the max and min elements of c such that c(i)<0 and c(i)>0, respectively */ buf = c->data; for (i=0; istride) { aux = *buf; aux = -1.0/aux; *buf = aux; /* Vector values are overwritten */ if ((aux<0.0) && (aux>lda0)) lda0 = aux; else if ((aux>0.0) && (auxFFF_NEGINF) || !(lda1 EL_LDA_TOL) { iter ++; if (iter > EL_LDA_ITERMAX) break; /* Compute: g(lda) = \sum_i w_i / (lda - c_i) dg(lda) = -\sum_i w_i / (lda - c_i)^2 */ g = 0.0; dg = 0.0; buf = c->data; if (w == NULL) { for (i=0; istride) { aux = 1/(lda-*buf); g += aux; dg += FFF_SQR(aux); } } else { bufW = w->data; for (i=0; istride, bufW+=w->stride) { aux = 1/(lda-*buf); g += *bufW * aux; dg += *bufW * FFF_SQR(aux); } } /* Update brakets */ if (g > 0.0) lda0 = lda; else if (g < 0.0) lda1 = lda; /* Accept the Newton update if it falls within the brakets */ ldac = lda + (g/dg); if ((lda0 < lda) && (lda < lda1)) lda = ldac; else lda = .5*(lda0+lda1); /* Error update */ err = lda1 - lda0; } return lda; } /******************************* GRUBB STATISTIC *******************************/ static double _fff_onesample_grubb(void* params, const fff_vector* x, double base) { size_t i; double t=0.0, mean, std, inv_std, ti; double *buf = x->data; if (params != NULL) return FFF_NAN; base = 0; /* Compute the mean and std deviation */ std = sqrt(fff_vector_ssd(x, &mean, 0)/(long double)x->size); inv_std = 1/std; if (t >= FFF_POSINF) return 0.0; /* Compute the max of Studentized datapoints */ for (i=0; isize; i++, buf+=x->stride) { ti = (*buf-mean) * inv_std; ti = FFF_ABS(ti); if (ti > t) t = ti; } return t; } /*****************************************************************************************/ /* Mixed-effect statistic structure */ /*****************************************************************************************/ fff_onesample_stat_mfx* fff_onesample_stat_mfx_new(unsigned int n, fff_onesample_stat_flag flag, double base) { fff_onesample_stat_mfx* thisone = (fff_onesample_stat_mfx*)malloc(sizeof(fff_onesample_stat_mfx)); if (thisone == NULL) return NULL; /* Fields */ thisone->flag = flag; thisone->base = base; thisone->empirical = 1; thisone->niter = 0; thisone->constraint = 0; thisone->params = NULL; /* Switch (possibly overwrite the 'par' field)*/ switch (flag) { case FFF_ONESAMPLE_STUDENT_MFX: thisone->empirical = 0; thisone->compute_stat = &_fff_onesample_LR_gmfx; thisone->params = (void*)(&(thisone->niter)); break; case FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX: thisone->empirical = 0; thisone->compute_stat = &_fff_onesample_mean_gmfx; thisone->params = (void*)(&(thisone->niter)); break; case FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX: thisone->compute_stat = &_fff_onesample_mean_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; case FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX: thisone->compute_stat = &_fff_onesample_median_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 1); break; case FFF_ONESAMPLE_SIGN_STAT_MFX: thisone->compute_stat = &_fff_onesample_sign_stat_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; case FFF_ONESAMPLE_WILCOXON_MFX: thisone->compute_stat = &_fff_onesample_wilcoxon_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 1); break; case FFF_ONESAMPLE_ELR_MFX: thisone->compute_stat = &_fff_onesample_LR_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } /* End switch */ return thisone; } void fff_onesample_stat_mfx_delete(fff_onesample_stat_mfx* thisone) { if (thisone == NULL) return; if (thisone->empirical) _fff_onesample_mfx_delete((fff_onesample_mfx*)thisone->params); free(thisone); return; } static fff_onesample_mfx* _fff_onesample_mfx_new(unsigned int n, unsigned int* niter, int flagIdx) { fff_onesample_mfx* thisone; thisone = (fff_onesample_mfx*)malloc(sizeof(fff_onesample_mfx)); thisone->w = fff_vector_new(n); thisone->z = fff_vector_new(n); thisone->Q = fff_matrix_new(n, n); thisone->tvar = fff_vector_new(n); thisone->tmp1 = fff_vector_new(n); thisone->tmp2 = fff_vector_new(n); thisone->idx = NULL; thisone->niter = niter; if (flagIdx == 1) thisone->idx = (fff_indexed_data*)calloc(n, sizeof(fff_indexed_data)); return thisone; } static void _fff_onesample_mfx_delete(fff_onesample_mfx* thisone) { fff_vector_delete(thisone->w); fff_vector_delete(thisone->z); fff_matrix_delete(thisone->Q); fff_vector_delete(thisone->tvar); fff_vector_delete(thisone->tmp1); fff_vector_delete(thisone->tmp2); if (thisone->idx != NULL) free(thisone->idx); free(thisone); return; } double fff_onesample_stat_mfx_eval(fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx) { double t; t = thisone->compute_stat(thisone->params, x, vx, thisone->base); return t; } /*****************************************************************************************/ /* Standard MFX (normal population model) */ /*****************************************************************************************/ static double _fff_onesample_mean_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base) { unsigned int niter = *((unsigned int*)params); double mu = 0.0, v = 0.0; _fff_onesample_gmfx_EM(&mu, &v, x, var, niter, 0); return (mu-base); } static double _fff_onesample_LR_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base) { int sign; double t, mu = 0.0, v = 0.0, v0 = 0.0, nll, nll0; unsigned int niter = *((unsigned int*)params); /* Estimate maximum likelihood group mean and group variance */ _fff_onesample_gmfx_EM(&mu, &v, x, var, niter, 0); /* MFX mean estimate equals baseline, return zero */ t = mu - base; sign = FFF_SIGN(t); if (sign == 0) return 0.0; /* Estimate maximum likelihood group variance under zero group mean assumption */ _fff_onesample_gmfx_EM(&base, &v0, x, var, niter, 1); /* Negated log-likelihoods */ nll = _fff_onesample_gmfx_nll(x, var, mu, v); nll0 = _fff_onesample_gmfx_nll(x, var, base, v0); /* If both nll and nll0 are globally minimized, we always have: nll0 >= nll; however, EM convergence issues may cause nll>nll0, in which case we return 0.0 */ t = -2.0 * (nll - nll0); t = FFF_MAX(t, 0.0); if (t < FFF_POSINF) return sign * sqrt(t); /* To get perhaps a more "Student-like" statistic: t = sign * sqrt((n-1)*(exp(t/nn) - 1.0)); */ else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* EM algorithm to estimate the mean and variance parameters. */ static void _fff_onesample_gmfx_EM(double* m, double* v, const fff_vector* x, const fff_vector* var, unsigned int niter, int constraint) { size_t n = x->size, i; unsigned int iter = 0; double nn=(double)n, m1, v1, m0, v0, mi_ap, vi_ap, aux; double *bufx, *bufvar; /* Initialization: pure RFX solution (FFX variances set to zero) */ if ( ! constraint ) /** m1 = gsl_stats_mean(x->data, x->stride, n); v1 = gsl_stats_variance_with_fixed_mean(x->data, x->stride, n, m1); **/ v1 = fff_vector_ssd(x, &m1, 0)/(long double)x->size; else { m1 = 0.0; v1 = fff_vector_ssd(x, &m1, 1)/(long double)x->size; } /* Refine result using an EM loop */ while (iter < niter) { /* Previous estimates */ m0 = m1; v0 = v1; /* Loop: aggregated E- and M-steps */ bufx = x->data; bufvar = var->data; if ( ! constraint ) m1 = 0.0; v1 = 0.0; for (i=0; istride, bufvar+=var->stride) { /* Posterior mean and variance of the true effect value */ aux = 1.0 / (*bufvar + v0); mi_ap = v0 * (*bufx) + (*bufvar) * m0; mi_ap *= aux; vi_ap = aux * (*bufvar) * v0; /* Update */ if ( ! constraint ) m1 += mi_ap; v1 += vi_ap + FFF_SQR(mi_ap); } /* Normalization */ if ( ! constraint ) m1 /= nn; v1 /= nn; v1 -= FFF_SQR(m1); /* Iteration number */ iter ++; } /* Save estimates */ *m = m1; *v = v1; return; } /* Negated log-likelihood for the MFX model */ static double _fff_onesample_gmfx_nll(const fff_vector* x, const fff_vector* var, double m, double v) { size_t n = x->size, i; double s, aux, ll = 0.0; double *bufx = x->data, *bufvar = var->data; for (i=0; istride, bufvar+=var->stride) { s = *bufvar + v; aux = *bufx - m; ll += log(s); ll += FFF_SQR(aux) / s; } ll *= .5; return ll; } /*****************************************************************************************/ /* Empirical MFX */ /*****************************************************************************************/ static double _fff_onesample_mean_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double m; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; long double aux, sumw; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the mean of the estimated distribution */ /** m = gsl_stats_wmean (Params->w->data, Params->w->stride, Params->z->data, Params->z->stride, Params->z->size) - base; **/ aux = fff_vector_wsum(Params->z, Params->w, &sumw); m = aux/sumw - base; return m; } static double _fff_onesample_median_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double m; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the median of the estimated distribution */ /** m = fff_weighted_median(Params->idx, Params->w, Params->z) - base; **/ _fff_sort_z(Params->idx, Params->tmp1, Params->tmp2, Params->z, Params->w); m = fff_vector_wmedian_from_sorted_data (Params->tmp1, Params->tmp2); return m; } static double _fff_onesample_sign_stat_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { fff_onesample_mfx* Params = (fff_onesample_mfx*)params; double *buf, *bufw; double aux, rp = 0.0, rm = 0.0; size_t i, n = x->size; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the sign statistic of the fitted distribution */ buf = Params->z->data; bufw = Params->w->data; for (i=0; iz->stride, bufw+=Params->w->stride) { aux = *buf - base; if (aux > 0.0) rp += *bufw; else if (aux < 0.0) rm += *bufw; else { /* in case the center is exactly zero */ aux = .5 * *bufw; rp += aux; rm += aux; } } return (rp-rm); } static double _fff_onesample_wilcoxon_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double t = 0.0; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; size_t i, n = x->size; double *buf1, *buf2; double zi, wi, Ri; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the vector of absolute residuals wrt the baseline */ buf1 = Params->tmp1->data; buf2 = Params->z->data; for(i=0; itmp1->stride, buf2+=Params->z->stride) { zi = *buf2 - base; *buf1 = FFF_ABS(zi); } /* Sort the absolute residuals and get the permutation of indices */ /** gsl_sort_vector_index(Params->idx, Params->tmp1); **/ _fff_sort_z(Params->idx, Params->tmp1, Params->tmp2, Params->z, Params->w); /* Compute the sum of ranks */ /** Ri = 0.0; for(i=0; iidx->data[i]; zi = Params->z->data[j*Params->z->stride]; wi = Params->w->data[j*Params->w->stride]; Ri += wi; if (zi > base) t += wi * Ri; else if (zi < base) t -= wi * Ri; }**/ Ri = 0.0; for(i=1, buf1=Params->tmp1->data, buf2=Params->tmp2->data; i<=n; i++) { zi = *buf1; wi = *buf2; Ri += wi; if (zi > base) t += wi * Ri; else if (zi < base) t -= wi * Ri; } return t; } static double _fff_onesample_LR_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double t, mu, nll, nll0; int sign; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; long double aux, sumw; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); nll = _fff_onesample_mfx_nll(Params, x); /* Estimate the population mean */ /** mu = gsl_stats_wmean (Params->w->data, Params->w->stride, Params->z->data, Params->z->stride, Params->z->size); **/ aux = fff_vector_wsum(Params->z, Params->w, &sumw); mu = aux/sumw - base; /* MFX mean estimate equals baseline, return zero */ t = mu - base; sign = FFF_SIGN(t); if (sign == 0) return 0.0; /* Estimate the population distribution under zero mean constraint */ _fff_onesample_mfx_EM(Params, x, var, 1); nll0 = _fff_onesample_mfx_nll(Params, x); /* Compute the one-sided likelihood ratio statistic */ t = -2.0 * (nll - nll0); t = FFF_MAX(t, 0.0); if (t < FFF_POSINF) return sign * sqrt(t); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* EM algorithm to estimate the population distribution as a linear combination of Diracs centered at the datapoints. */ static void _fff_onesample_mfx_EM(fff_onesample_mfx* Params, const fff_vector* x, const fff_vector* var, int constraint) { fff_vector *w = Params->w, *z = Params->z; fff_vector *tvar = Params->tvar, *tmp1 = Params->tmp1, *tmp2 = Params->tmp2; fff_matrix *Q = Params->Q; unsigned int niter = *(Params->niter); size_t n = x->size, i, k; unsigned int iter = 0; double m, lda, aux; double *buf, *buf2; fff_vector Qk; /* Pre-process: low threshold the variances to avoid numerical instabilities */ aux = fff_vector_ssd(x, &m, 0)/(long double)(FFF_MAX(n,2)-1); aux *= MIN_RELATIVE_VAR_FFX; fff_vector_memcpy(tvar, var); buf = tvar->data; for(i=0; istride) { if (*buf < aux) *buf = aux; } /* Initial estimate: uniform weigths, class centers at datapoints */ fff_vector_set_all(w, 1/(double)n); fff_vector_memcpy(z, x); /* Refine result using an EM loop */ while (iter < niter) { /* Compute the posterior probability matrix Qik : probability that subject i belongs to class k */ _fff_onesample_mfx_EM_init(Params, x, 0); /* Update weights: wk = sum_i Qik / n */ buf = w->data; for(k=0; kstride) { Qk = fff_matrix_col(Q, k); *buf = fff_vector_sum(&Qk)/(long double)n; } /* Reweight if restricted maximum likelihood: use the same Newton algorithm as in standard empirical likelihood */ if ( constraint ) { fff_vector_memcpy(tmp1, z); lda = _fff_el_solve_lda(tmp1, w); if(lda < FFF_POSINF) { buf = z->data; buf2 = w->data; for(i=0; istride, buf2+=w->stride) *buf2 *= 1/(1 + lda*(*buf)); } } /* Update centers: zk = sum_i Rik xi with Rik = Qik/si^2 */ buf = z->data; buf2 = tmp2->data; for(k=0; kstride, buf2+=tmp2->stride) { /* Store the unconstrained ML update in z */ Qk = fff_matrix_col(Q, k); fff_vector_memcpy(tmp1, &Qk); fff_vector_div(tmp1, tvar); /* Store Rik in tmp1 */ aux = (double)fff_vector_sum(tmp1); /* aux == Rk = sum_i Rik */ aux = FFF_ENSURE_POSITIVE(aux); *buf = fff_blas_ddot(tmp1, x); /* z[k] = sum_i Rik xi */ *buf /= aux; /* Store Rk = sum_i Rik in tmp2 */ *buf2 = aux; } /* Shift to zero if restricted maximum likelihood */ if ( constraint ) { fff_vector_memcpy(tmp1, w); fff_vector_div(tmp1, tmp2); /* tmp1_k == wk/Rk */ aux = fff_blas_ddot(w, tmp1); /* aux == sum_k [ wk^2 / Rk ] */ lda = fff_blas_ddot(w, z); /* lda = sum_k wk zk */ aux = FFF_ENSURE_POSITIVE(aux); lda /= aux; /* lda = sum_k wk zk / sum_k [ wk^2 / Rk ] */ fff_blas_daxpy(-lda, tmp1, z); /* zk = zk - lda * wk/Rk */ } /* Iteration number */ iter ++; } return; } /* If flag == 0, assemble the posterior probability matrix Q Qik : posterior probability that subject i belongs to class k. Qik = ci wk g(xi-zk,si) ci determined by sum_k Qik = 1 Otherwise, assemble the likelihood matrix G Gik = g(xi-zk,si) */ static void _fff_onesample_mfx_EM_init(fff_onesample_mfx* Params, const fff_vector* x, int flag) { fff_matrix* Q = Params->Q; const fff_vector *w = Params->w, *z = Params->z, *var = Params->tvar; size_t i, k, n = x->size, ii; double xi, si; double *bufQ, *bufxi, *bufvi, *bufwk, *bufzk; double sum = 0.0, aux; /* Loop over subjects */ bufxi = x->data; bufvi = var->data; for(i=0; istride, bufvi+=var->stride) { xi = *bufxi; si = sqrt(*bufvi); ii = i*Q->tda; /* First element of the i-th line of Q */ /* Loop over classes: compute Qik = wk * g(xi-zk,si), for each k */ bufwk = w->data; bufzk = z->data; bufQ = Q->data + ii; sum = 0.0; for(k=0; kstride, bufzk+=z->stride) { /** aux = gsl_ran_gaussian_pdf(xi-*bufzk, si); **/ aux = (xi-*bufzk)/si; aux = exp(-.5 * FFF_SQR(aux)); /* No need to divide by sqrt(2pi)si as it is constant */ *bufQ = FFF_ENSURE_POSITIVE(aux); /* Refrain posterior probabilities from vanishing */ if (flag == 0) { *bufQ *= *bufwk; sum += *bufQ; } } /* Loop over classes: normalize Qik */ if (flag == 0) { bufQ = Q->data + ii; for(k=0; kw; fff_vector *Gw = Params->tmp1; fff_matrix* G = Params->Q; size_t i, n = w->size; double aux, nll = 0.0; double *buf; /* Compute G */ _fff_onesample_mfx_EM_init(Params, x, 1); /* Compute Gw */ fff_blas_dgemv(CblasNoTrans, 1.0, G, w, 0.0, Gw); /* Compute the sum of logarithms of Gw */ buf = Gw->data; for (i=0; istride) { aux = *buf; aux = FFF_ENSURE_POSITIVE(aux); nll -= log(aux); } return nll; } extern void fff_onesample_stat_mfx_pdf_fit(fff_vector* w, fff_vector* z, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* var) { fff_onesample_mfx* Params = (fff_onesample_mfx*)thisone->params; unsigned int constraint = thisone->constraint; /* Check appropriate flag */ if (!thisone->empirical) return; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, constraint); /* Copy result in output vectors */ fff_vector_memcpy(w, Params->w); fff_vector_memcpy(z, Params->z); return; } extern void fff_onesample_stat_gmfx_pdf_fit(double *mu, double *v, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* var) { unsigned int niter = thisone->niter; unsigned int constraint = thisone->constraint; /* Estimate the population gaussian parameters using EM */ _fff_onesample_gmfx_EM(mu, v, x, var, niter, constraint); } /** Comparison function for qsort **/ static int _fff_indexed_data_comp(const void * x, const void * y) { int ans = 1; fff_indexed_data xx = *((fff_indexed_data*)x); fff_indexed_data yy = *((fff_indexed_data*)y); if (yy.x > xx.x) { ans = -1; return ans; } if (yy.x == xx.x) ans = 0; return ans; } /** Sort z array and re-order w accordingly **/ static void _fff_sort_z(fff_indexed_data* idx, fff_vector* tmp1, fff_vector* tmp2, const fff_vector* z, const fff_vector* w) { size_t n = z->size, i, is; double *buf1, *buf2; fff_indexed_data* buf_idx; /* Copy z into the auxiliary qsort structure idx */ for(i=0, buf1=z->data, buf_idx=idx; istride) { (*buf_idx).x = *buf1; (*buf_idx).i = i; } /* Effectively sort */ qsort (idx, n, sizeof(fff_indexed_data), &_fff_indexed_data_comp); /* Copy the sorted z into tmp1, and the accordingly sorted w into tmp2 */ for(i=0, buf1=tmp1->data, buf2=tmp2->data, buf_idx=idx; istride, buf2+=tmp2->stride) { is = (*buf_idx).i; *buf1 = (*buf_idx).x; *buf2 = w->data[ is*w->stride ]; } return; } /* Sign permutations */ void fff_onesample_permute_signs(fff_vector* xx, const fff_vector* x, double magic) { size_t n = x->size, i; double *bufx=x->data, *bufxx=xx->data; double m = magic, aux; for (i=0; istride, bufxx+=xx->stride) { aux = m/2; m = FFF_FLOOR(aux); aux -= m; if (aux > 0) *bufxx = -*bufx; else *bufxx = *bufx; } return; } nipy-0.6.1/lib/fff/fff_onesample_stat.h000066400000000000000000000151441470056100100200130ustar00rootroot00000000000000/*! \file fff_onesample_stat.h \brief One-sample test statistics \author Alexis Roche \date 2004-2008 */ #ifndef FFF_ONESAMPLE_STAT #define FFF_ONESAMPLE_STAT #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" /*! \typedef fff_onesample_stat_flag \brief Decision statistic for one-sample tests \c FFF_ONESAMPLE_MEAN is the sample mean. In permutation testing context, it is equivalent to \c FFF_ONESAMPLE_STUDENT (see below). \c FFF_ONESAMPLE_MEDIAN is the sample median. \c FFF_ONESAMPLE_STUDENT is the one-sample Student statistic defined as \f$ t = \frac{\hat{m}-m}{\hat{\sigma}/\sqrt{n}} \f$, where \a n is the sample size, \f$\hat{m}\f$ is the sample mean, and \f$\hat{\sigma}\f$ is the sample standard deviation normalized by \a n-1. \c FFF_ONESAMPLE_LAPLACE is a robust version of Student's \a t based on the Laplace likelihood ratio. The statistic is defined by: \f$ t = {\rm sign}(med-m) \sqrt{2n\log(\frac{s_0}{s})}\f$, where \a n is the sample size, \f$med\f$ is the sample median, and \f$s, s_0\f$ are the mean absolute deviations wrt the median and the baseline, respectively. Owing to Wilks's theorem, \a t is an approximate Z-statistic under the null assumption \a m=base. \c FFF_ONESAMPLE_TUKEY is similar to Laplace's \a t except the scale estimates are computed using the median of absolute deviations (MAD) rather than the average absolute deviation. This provides an even more robust statistic, which we term Tukey's \a t as Tukey appears to be the first author who proposed MAD as a scale estimator. \c FFF_ONESAMPLE_SIGN_STAT is the simple sign statistic, \f$ t = (n_+ - n_-)/n \f$ where \f$ n_+ \f$ (resp. \f$ n_- \f$) is the number of sample values greater than (resp. lower than) the baseline, and \a n is the total sample size. \c FFF_ONESAMPLE_SIGNED_RANK is Wilcoxon's signed rank statistic, \f$ t = \frac{2}{n(n+1)} \sum_i {\rm rank}(|x_i-m|) {\rm sign}(x_i-m) \f$, where rank values range from 1 to \a n, the sample size. Using this definition, \a t ranges from -1 to 1. \c FFF_ONESAMPLE_ELR implements the empirical likelihood ratio for a univariate mean (see Owen, 2001). The one-tailed statistic is defined as: \f$ t = {\rm sign}(\hat{\mu}-m) \sqrt{-2\log\lambda} \f$, where \a n is the sample size, \f$\hat{\mu}\f$ is the empirical mean, and \f$\lambda\f$ is the empirical likelihood ratio. The latter is given by \f$ \lambda = \prod_{i=1}^n nw_i\f$ where \f$ w_i \f$ are nonnegative weights assessing the "probability" of each datapoint under the null assumption that the population mean equals \a m. \c FFF_ONESAMPLE_GRUBB is the Grubb's statistic for normality testing. It is defined as \f$ t = \max_i \frac{|x_i-\hat{m}|}{\hat{\sigma}} \f$ where \f$\hat{m}\f$ is the sample mean, and \f$\hat{\sigma}\f$ is the sample standard deviation. */ typedef enum { FFF_ONESAMPLE_EMPIRICAL_MEAN = 0, FFF_ONESAMPLE_EMPIRICAL_MEDIAN = 1, FFF_ONESAMPLE_STUDENT = 2, FFF_ONESAMPLE_LAPLACE = 3, FFF_ONESAMPLE_TUKEY = 4, FFF_ONESAMPLE_SIGN_STAT = 5, FFF_ONESAMPLE_WILCOXON = 6, FFF_ONESAMPLE_ELR = 7, FFF_ONESAMPLE_GRUBB = 8, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX = 10, FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX = 11, FFF_ONESAMPLE_STUDENT_MFX = 12, FFF_ONESAMPLE_SIGN_STAT_MFX = 15, FFF_ONESAMPLE_WILCOXON_MFX = 16, FFF_ONESAMPLE_ELR_MFX = 17, FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX = 19 } fff_onesample_stat_flag; /*! \struct fff_onesample_stat \brief General structure for one-sample test statistics */ typedef struct{ fff_onesample_stat_flag flag; /*!< statistic's identifier */ double base; /*!< baseline for mean-value testing */ unsigned int constraint; /* non-zero for statistics computed from maximum likelihood under the null hypothesis */ void* params; /*!< other auxiliary parameters */ double (*compute_stat)(void*, const fff_vector*, double); /*!< actual statistic implementation */ } fff_onesample_stat; /*! \struct fff_onesample_stat_mfx \brief General structure for one-sample test statistics with mixed-effects Tests statistics corrected for mixed effects, i.e. eliminates the influence of heteroscedastic measurement errors. The classical Student statistic is generalized from the likelihood ratio of the model including heteroscedastic first-level errors. More comments to come. */ typedef struct{ fff_onesample_stat_flag flag; /*!< MFX statistic's identifier */ double base; /*!< baseline for mean-value testing */ int empirical; /*!< boolean, tells whether MFX statistic is nonparametric or not */ unsigned int niter; /* non-zero for statistics based on iterative algorithms */ unsigned int constraint; /* non-zero for statistics computed from maximum likelihood under the null hypothesis */ void* params; /*!< auxiliary parameters */ double (*compute_stat)(void*, const fff_vector*, const fff_vector*, double); /*!< actual statistic implementation */ } fff_onesample_stat_mfx; /*! \brief Constructor for the \c fff_onesample_stat structure \param n sample size \param flag statistic identifier \param base baseline value for mean-value testing */ extern fff_onesample_stat* fff_onesample_stat_new(unsigned int n, fff_onesample_stat_flag flag, double base); /*! \brief Destructor for the \c fff_onesample_stat structure \param thisone instance to be deleted */ extern void fff_onesample_stat_delete(fff_onesample_stat* thisone); /*! \brief Compute a one-sample test statistic \param thisone already created one-sample stat structure \param x input vector */ extern double fff_onesample_stat_eval(fff_onesample_stat* thisone, const fff_vector* x); /** MFX **/ extern fff_onesample_stat_mfx* fff_onesample_stat_mfx_new(unsigned int n, fff_onesample_stat_flag flag, double base); extern void fff_onesample_stat_mfx_delete(fff_onesample_stat_mfx* thisone); extern double fff_onesample_stat_mfx_eval(fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); extern void fff_onesample_stat_mfx_pdf_fit(fff_vector* w, fff_vector* z, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); extern void fff_onesample_stat_gmfx_pdf_fit(double* mu, double* v, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); /** Sign permutations **/ extern void fff_onesample_permute_signs(fff_vector* xx, const fff_vector* x, double magic); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_routines.c000066400000000000000000000052141470056100100166350ustar00rootroot00000000000000#include "fff_routines.h" #include "fff_base.h" #include #include typedef struct{ double x; long i; } dummy_struct; static int _dummy_struct_geq(const void * x, const void * y) { int ans = -1; dummy_struct xx = *((dummy_struct*)x); dummy_struct yy = *((dummy_struct*)y); if ( xx.x > yy.x ) { ans = 1; return ans; } if ( xx.x == yy.x ) ans = 0; return ans; } extern void sort_ascending_and_get_permutation( double* x, long* idx, long n ) { long i; double *bufx; dummy_struct* xx = (dummy_struct*)calloc( n, sizeof(dummy_struct) ); dummy_struct* buf_xx; long* buf_idx; bufx = x; buf_idx = idx; buf_xx = xx; for ( i=0; idimX; long idx = 0; double val,max = (double) fff_array_get1d(farray,idx); for (i=0 ; imax){ max = val; idx = i; } } return idx; } extern long fff_array_argmin1d(const fff_array *farray) { /* returns the index of the max value on a supposedly 1D array quick and dirty implementation */ long i,n = farray->dimX; long idx = 0; double val,min = (double) fff_array_get1d(farray,idx); for (i=0 ; idimX; double val,min = (double) fff_array_get1d(farray,0); for (i=0 ; idimX; double val,max = (double) fff_array_get1d(farray,0); for (i=0 ; imax) max = val; } return max; } nipy-0.6.1/lib/fff/fff_routines.h000066400000000000000000000014731470056100100166450ustar00rootroot00000000000000/*! \file fff_routines.h \brief A few standard functions that are always necessary \author bertrand Thirion and Alexis Roche \date 2008 Things could also be put somewhere else. The implementation has often a quick-and-dirty flavour. */ #ifndef FFF_ROUTINES #define FFF_ROUTINES #ifdef __cplusplus extern "C" { #endif #include #include #include "fff_array.h" #include "fff_matrix.h" extern void sort_ascending_and_get_permutation( double* x, long* idx, long n ); extern void sort_ascending(double *x, int n); extern long fff_array_argmax1d(const fff_array *farray); extern long fff_array_argmin1d(const fff_array *farray); extern double fff_array_min1d(const fff_array *farray); extern double fff_array_max1d(const fff_array *farray); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_specfun.c000066400000000000000000000030751470056100100164330ustar00rootroot00000000000000/* Special functions for FFF. * Author: Gael Varoquaux (implemented from canonical sources: * log gammma: algorithm as described in numerical recipes * psi : algorithm as described in Applied Statistics, * Volume 25, Number 3, 1976, pages 315-317. * * License: BSD */ #include "fff_specfun.h" #include double fff_gamln(double x) { /* Log Gamma. * * INPUT: x > 0 */ double coeff[] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, .1208650973866179e-2, -.5395239384953e-5 }; const double stp = 2.5066282746310005; double y = x; double sum = 1.000000000190015; double out ; int i; for(i=0; i<6; i++) { y += 1; sum += coeff[i]/y; } out = x + 5.5; out = (x+0.5) * log(out) - out; return out + log(stp*sum/x); } double fff_psi(double x) { /* psi: d gamln(x)/dx * * INPUT: x > 0 */ double c = 8.5; double d1 = -0.5772156649; double r; double s = 0.00001; double s3 = 0.08333333333; double s4 = 0.0083333333333; double s5 = 0.003968253968; double out; double y; /* XXX: What if x < 0 ? */ y = x; out = 0.0; /* Use approximation if argument <= s */ if (y<= s) { out = d1 - 1.0 / y; return out; } /* Reduce to psi(x + n) where (x + n) >= c */ while (y c */ r = 1.0 / y; out += log (y) - 0.5*r; r = r*r; out += -r*(s3 - r * ( s4 - r*s5)); return out; } nipy-0.6.1/lib/fff/fff_specfun.h000066400000000000000000000005361470056100100164370ustar00rootroot00000000000000/*! \file fff_specfun.h \brief special functions needed by fff's C routines. \author Alexis Roche, Gael Varoquaux \date 2008, 2009 \licence BSD */ #ifndef FFF_SPECFUN #define FFF_SPECFUN #ifdef __cplusplus extern "C" { #endif extern double fff_psi(double x); extern double fff_gamln(double x); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_twosample_stat.c000066400000000000000000000230151470056100100200320ustar00rootroot00000000000000#include "fff_twosample_stat.h" #include "fff_onesample_stat.h" #include "fff_gen_stats.h" #include "fff_glm_twolevel.h" #include "fff_base.h" #include #include #include #include static double _fff_twosample_student(void* params, const fff_vector* x, unsigned int n1); static double _fff_twosample_wilcoxon(void* params, const fff_vector* x, unsigned int n1); static double _fff_twosample_student_mfx(void* params, const fff_vector* x, const fff_vector* vx, unsigned int n1); static void _fff_twosample_mfx_assembly(fff_matrix* X, fff_matrix* PX, fff_matrix* PPX, unsigned int n1, unsigned int n2); typedef struct{ fff_glm_twolevel_EM *em; unsigned int* niter; fff_vector* work; fff_matrix* X; fff_matrix* PX; fff_matrix* PPX; } fff_twosample_mfx; fff_twosample_stat* fff_twosample_stat_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) { fff_twosample_stat* thisone = (fff_twosample_stat*)malloc(sizeof(fff_twosample_stat)); if (thisone == NULL) { FFF_ERROR("Cannot allocate memory", ENOMEM); return NULL; } thisone->n1 = n1; thisone->n2 = n2; thisone->flag = flag; thisone->params = NULL; switch (flag) { case FFF_TWOSAMPLE_STUDENT: thisone->compute_stat = &_fff_twosample_student; break; case FFF_TWOSAMPLE_WILCOXON: thisone->compute_stat = &_fff_twosample_wilcoxon; break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } return thisone; } void fff_twosample_stat_delete(fff_twosample_stat* thisone) { if (thisone == NULL) return; free(thisone); return; } double fff_twosample_stat_eval(fff_twosample_stat* thisone, const fff_vector* x) { double t; t = thisone->compute_stat(thisone->params, x, thisone->n1); return t; } fff_twosample_stat_mfx* fff_twosample_stat_mfx_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) { fff_twosample_stat_mfx* thisone = (fff_twosample_stat_mfx*)malloc(sizeof(fff_twosample_stat_mfx)); fff_twosample_mfx* aux; unsigned int n = n1+n2; if (thisone == NULL) { FFF_ERROR("Cannot allocate memory", ENOMEM); return NULL; } thisone->n1 = n1; thisone->n2 = n2; thisone->flag = flag; thisone->niter = 0; switch (flag) { case FFF_TWOSAMPLE_STUDENT_MFX: thisone->compute_stat = &_fff_twosample_student_mfx; aux = (fff_twosample_mfx*)malloc(sizeof(fff_twosample_mfx)); thisone->params = (void*)aux; aux->em = fff_glm_twolevel_EM_new(n, 2); aux->niter = &(thisone->niter); aux->work = fff_vector_new(n); aux->X = fff_matrix_new(n, 2); aux->PX = fff_matrix_new(2, n); aux->PPX = fff_matrix_new(2, n); _fff_twosample_mfx_assembly(aux->X, aux->PX, aux->PPX, n1, n2); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } return thisone; } void fff_twosample_stat_mfx_delete(fff_twosample_stat_mfx* thisone) { fff_twosample_mfx* aux; if (thisone == NULL) return; switch (thisone->flag) { case FFF_TWOSAMPLE_STUDENT_MFX: aux = (fff_twosample_mfx*) thisone->params; fff_vector_delete(aux->work); fff_matrix_delete(aux->X); fff_matrix_delete(aux->PX); fff_matrix_delete(aux->PPX); fff_glm_twolevel_EM_delete(aux->em); free(aux); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } free(thisone); return; } double fff_twosample_stat_mfx_eval(fff_twosample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx) { double t; t = thisone->compute_stat(thisone->params, x, vx, thisone->n1); return t; } /********************************************************************* Actual test statistic implementation **********************************************************************/ static double _fff_twosample_student(void* params, const fff_vector* x, unsigned int n1) { fff_vector x1, x2; unsigned int naux = x->size-n1; double t, m1, m2; long double v1, aux; /* Compute within-group means and variances */ x1 = fff_vector_view(x->data, n1, x->stride); x2 = fff_vector_view(x->data+n1, naux, x->stride); v1 = fff_vector_ssd(&x1, &m1, 0); aux = fff_vector_ssd(&x2, &m2, 0); /* Compute max( n1+n2-2, 1 ) */ naux += n1-2; if (naux<=0) naux = 1; /* Compute the inverse std estimate */ aux += v1; aux /= naux; aux = sqrt(aux); if (aux<=0.0) aux = FFF_POSINF; else aux = 1/aux; /* t value */ t = (m1-m2)*aux; return t; } /* Wilcoxon. */ static double _fff_twosample_wilcoxon(void* params, const fff_vector* x, unsigned int n1) { fff_vector x1, x2; unsigned int i, j, n2=x->size-n1; double w=0.0, aux; double *b1, *b2; x1 = fff_vector_view(x->data, n1, x->stride); x2 = fff_vector_view(x->data+n1, n2, x->stride); for(i=0, b1=x1.data; i *b2) aux += 1.0; else if (*b2 > *b1) aux -= 1.0; } aux /= (double)n2; w += aux; } return w; } /* Pre-compute matrices for two-sample mixed-effect linear analysis. X has two columns: c0 = [1 1 ... 1]' and c1 = [1 ... 1 | 0 ... 0]' */ static void _fff_twosample_mfx_assembly(fff_matrix* X, fff_matrix* PX, fff_matrix* PPX, unsigned int n1, unsigned int n2) { unsigned int n = n1+n2; double g1=1/(double)n1, g2=1/(double)n2; fff_matrix B; /* X */ fff_matrix_set_all(X, 1.0); B = fff_matrix_block(X, n1, n2, 1, 1); fff_matrix_set_all(&B, 0.0); /* PX */ B = fff_matrix_block(PX, 0, 1, 0, n1); fff_matrix_set_all(&B, 0.0); B = fff_matrix_block(PX, 0, 1, n1, n2); fff_matrix_set_all(&B, g2); B = fff_matrix_block(PX, 1, 1, 0, n1); fff_matrix_set_all(&B, g1); B = fff_matrix_block(PX, 1, 1, n1, n2); fff_matrix_set_all(&B, -g2); /* PPX */ B = fff_matrix_block(PPX, 0, 1, 0, n); fff_matrix_set_all(&B, 1.0/(double)n); B = fff_matrix_block(PPX, 1, 1, 0, n); fff_matrix_set_all(&B, 0.0); return; } static double _fff_twosample_student_mfx(void* params, const fff_vector* x, const fff_vector* vx, unsigned int n1) { fff_twosample_mfx* Params = (fff_twosample_mfx*)params; double F, sign, ll, ll0; unsigned int niter = *(Params->niter); /* Constrained EM */ fff_glm_twolevel_EM_init(Params->em); fff_glm_twolevel_EM_run(Params->em, x, vx, Params->X, Params->PPX, niter); ll0 = fff_glm_twolevel_log_likelihood(x, vx, Params->X, Params->em->b, Params->em->s2, Params->work); /* Unconstrained EM initialized with constrained maximization results */ fff_glm_twolevel_EM_run(Params->em, x, vx, Params->X, Params->PX, niter); ll = fff_glm_twolevel_log_likelihood(x, vx, Params->X, Params->em->b, Params->em->s2, Params->work); /* Form the generalized F statistic */ F = 2.0*(ll-ll0); F = FFF_MAX(F, 0.0); /* Just to make sure */ sign = Params->em->b->data[1]; /* Contiguity ensured */ sign = FFF_SIGN(sign); return sign*sqrt(F); } /********************************************************************* Permutations **********************************************************************/ unsigned int fff_twosample_permutation(unsigned int* idx1, unsigned int* idx2, unsigned int n1, unsigned int n2, double* magic) { unsigned int n=FFF_MIN(n1, n2), i; double aux, magic1, magic2, cuml=0, cumr=1,c1=1, c2=1; /* Pre-computation mode */ if ( (idx1==NULL) || (idx2==NULL) ) *magic = FFF_POSINF; /* Find i such that Cn1,i*Cn2,i <= magic < Cn1,i*Cn2,i + Cn1,i+1*Cn2,i+1 */ for(i=0; i<=n; i++) { /* Downshift the magic number on exit */ if (*magic= cumr) { /* AR,27/2/09 modified without certainty from *magic > cumr */ *magic = cumr; return 0; } /* Compute magic numbers for within-group combinations. We use: magic = magic2*c1 + magic1 */ magic2 = floor(*magic/c1); magic1 = *magic - magic2*c1; /* Find the underlying combinations */ fff_combination(idx1, i, n1, magic1); fff_combination(idx2, i, n2, magic2); return i; } /* px assumed allocated n1 + n2 */ #define SWAP(a, b) \ aux = a; \ a = b; \ b = aux void fff_twosample_apply_permutation(fff_vector* px, fff_vector* pv, const fff_vector* x1, const fff_vector* v1, const fff_vector* x2, const fff_vector* v2, unsigned int i, const unsigned int* idx1, const unsigned int* idx2) { unsigned int j; size_t i1, i2, n1=x1->size, n2=x2->size; double aux; double *bpx1, *bpx2; fff_vector px1, px2, pv1, pv2; int flag_mfx = (pv!=NULL); /* Copy input vectors into single output vector */ px1 = fff_vector_view(px->data, n1, px->stride); fff_vector_memcpy(&px1, x1); px2 = fff_vector_view(px->data + n1, n2, px->stride); fff_vector_memcpy(&px2, x2); if (flag_mfx) { pv1 = fff_vector_view(pv->data, n1, pv->stride); fff_vector_memcpy(&pv1, v1); pv2 = fff_vector_view(pv->data + n1, n2, pv->stride); fff_vector_memcpy(&pv2, v2); } /* Exchange elements */ for(j=0; jstride; bpx2 = px2.data + i2*px->stride; SWAP(*bpx1, *bpx2); if (flag_mfx) { bpx1 = pv1.data + i1*pv->stride; bpx2 = pv2.data + i2*pv->stride; SWAP(*bpx1, *bpx2); } } return; } nipy-0.6.1/lib/fff/fff_twosample_stat.h000066400000000000000000000047641470056100100200510ustar00rootroot00000000000000/*! \file fff_twosample_stat.h \brief One-sample test statistics \author Alexis Roche \date 2008 */ #ifndef FFF_TWOSAMPLE_STAT #define FFF_TWOSAMPLE_STAT #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" /* Two-sample stat flag */ typedef enum { FFF_TWOSAMPLE_STUDENT = 2, FFF_TWOSAMPLE_WILCOXON = 6, FFF_TWOSAMPLE_STUDENT_MFX = 12 } fff_twosample_stat_flag; /*! \struct fff_twosample_stat \brief General structure for two-sample test statistics */ typedef struct{ unsigned int n1; /*!< number of subjects in first group */ unsigned int n2; /*!< number of subjects in second group */ fff_twosample_stat_flag flag; /*!< statistic's identifier */ void* params; double (*compute_stat)(void*, const fff_vector*, unsigned int); /*!< actual statistic implementation */ } fff_twosample_stat; extern fff_twosample_stat* fff_twosample_stat_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag); extern void fff_twosample_stat_delete(fff_twosample_stat* thisone); extern double fff_twosample_stat_eval(fff_twosample_stat* thisone, const fff_vector* x); /** MFX **/ /*! \struct fff_twosample_stat_mfx \brief General structure for two-sample test statistics */ typedef struct{ unsigned int n1; /*!< number of subjects in first group */ unsigned int n2; /*!< number of subjects in second group */ fff_twosample_stat_flag flag; /*!< statistic's identifier */ unsigned int niter; void* params; /*! auxiliary structures */ double (*compute_stat)(void*, const fff_vector*, const fff_vector*, unsigned int); /*!< actual statistic implementation */ } fff_twosample_stat_mfx; extern fff_twosample_stat_mfx* fff_twosample_stat_mfx_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag); extern void fff_twosample_stat_mfx_delete(fff_twosample_stat_mfx* thisone); extern double fff_twosample_stat_mfx_eval(fff_twosample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); /** Label permutations **/ extern unsigned int fff_twosample_permutation(unsigned int* idx1, unsigned int* idx2, unsigned int n1, unsigned int n2, double* magic); extern void fff_twosample_apply_permutation(fff_vector* px, fff_vector* pv, const fff_vector* x1, const fff_vector* v1, const fff_vector* x2, const fff_vector* v2, unsigned int i, const unsigned int* idx1, const unsigned int* idx2); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/fff_vector.c000066400000000000000000000254441470056100100162760ustar00rootroot00000000000000#include "fff_base.h" #include "fff_vector.h" #include "fff_array.h" #include #include #include #include #include /* Declaration of static functions */ static double _fff_pth_element(double* x, size_t p, size_t stride, size_t size); static void _fff_pth_interval(double* am, double* aM, double* x, size_t p, size_t stride, size_t size); /* Constructor */ fff_vector* fff_vector_new(size_t size) { fff_vector* thisone; thisone = (fff_vector*)calloc(1, sizeof(fff_vector)); if (thisone == NULL) { FFF_ERROR("Allocation failed", ENOMEM); return NULL; } thisone->data = (double*)calloc(size, sizeof(double)); if (thisone->data == NULL) FFF_ERROR("Allocation failed", ENOMEM); thisone->size = size; thisone->stride = 1; thisone->owner = 1; return thisone; } /* Destructor */ void fff_vector_delete(fff_vector* thisone) { if (thisone->owner) if (thisone->data != NULL) free(thisone->data); free(thisone); return; } /* View */ fff_vector fff_vector_view(const double* data, size_t size, size_t stride) { fff_vector x; x.size = size; x.stride = stride; x.owner = 0; x.data = (double*)data; return x; } #define CHECK_SIZE(x,y) \ if ((x->size) != (y->size)) FFF_ERROR("Vectors have different sizes", EDOM) /* Vector copy. If both vectors are contiguous in memory, we use memcpy, otherwise we perform a loop */ void fff_vector_memcpy(fff_vector* x, const fff_vector* y) { CHECK_SIZE(x, y); if ((x->stride == 1) && (y->stride == 1)) memcpy((void*)x->data, (void*)y->data, x->size*sizeof(double)); else { size_t i; double *bx, *by; for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx = *by; } return; } /* Copy buffer with arbitrary type */ void fff_vector_fetch(fff_vector* x, const void* data, fff_datatype datatype, size_t stride) { fff_array a = fff_array_view1d(datatype, (void*)data, x->size, stride); fff_array b = fff_array_view1d(FFF_DOUBLE, x->data, x->size, x->stride); fff_array_copy(&b, &a); return; } /* Get an element */ double fff_vector_get (const fff_vector * x, size_t i) { return(x->data[ i * x->stride ]); } /* Set an element */ void fff_vector_set (fff_vector * x, size_t i, double a) { x->data[ i * x->stride ] = a; return; } /* Set all elements */ void fff_vector_set_all (fff_vector * x, double a) { size_t i; double *buf; for(i=0, buf=x->data; isize; i++, buf+=x->stride) *buf = a; return; } /* Add two vectors */ void fff_vector_add (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx += *by; return; } /* Compute: x = x - y */ void fff_vector_sub (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx -= *by; return; } /* Element-wise product */ void fff_vector_mul (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx *= *by; return; } /* Element-wise division */ void fff_vector_div (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx /= *by; return; } /* Scale by a constant */ void fff_vector_scale (fff_vector * x, double a) { size_t i; double *bx; for(i=0, bx=x->data; isize; i++, bx+=x->stride) *bx *= a; return; } /* Add a constant */ void fff_vector_add_constant (fff_vector * x, double a) { size_t i; double *bx; for(i=0, bx=x->data; isize; i++, bx+=x->stride) *bx += a; return; } /* Sum up elements */ long double fff_vector_sum(const fff_vector* x) { long double sum = 0.0; double* buf = x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) sum += *buf; return sum; } /* Mean */ double fff_vector_mean(const fff_vector* x) { return((double)(fff_vector_sum(x) / (double)x->size)); } /* SSD We use Konig formula: SUM[(x-a)^2] = SUM[(x-m)^2] + n*(a-m)^2 where m is the mean. */ long double fff_vector_ssd(const fff_vector* x, double* m, int fixed_offset) { long double ssd = 0.0; long double sum = 0.0; long double n = (long double)x->size; double aux; double* buf = x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) { aux = *buf; sum += aux; ssd += FFF_SQR(aux); } sum /= n; if (fixed_offset) { aux = *m - sum; ssd += n * (FFF_SQR(aux) - FFF_SQR(sum)); } else{ *m = sum; ssd -= n * FFF_SQR(sum); } return ssd; } long double fff_vector_wsum(const fff_vector* x, const fff_vector* w, long double* sumw) { long double wsum=0.0, aux=0.0; double *bufx=x->data, *bufw=w->data; size_t i; CHECK_SIZE(x, w); for(i=0; isize; i++, bufx+=x->stride, bufw+=w->stride) { wsum += (*bufw) * (*bufx); aux += *bufw; } *sumw = aux; return wsum; } long double fff_vector_sad(const fff_vector* x, double m) { long double sad=0.0; double aux; double *buf=x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) { aux = *buf-m; sad += FFF_ABS(aux); } return sad; } /* Median (modify input vector) */ double fff_vector_median(fff_vector* x) { double m; double* data = x->data; size_t stride = x->stride, size = x->size; if (FFF_IS_ODD(size)) m = _fff_pth_element(data, size>>1, stride, size); else{ double mm; _fff_pth_interval(&m, &mm, data, (size>>1)-1, stride, size); m = .5*(m+mm); } return m; } /* Quantile. Given a sample x, this function computes a value q so that the number of sample values that are greater or equal to q is smaller or equal to (1-r) * sample size. */ double fff_vector_quantile(fff_vector* x, double r, int interp) { double m, pp; double* data = x->data; size_t p, stride = x->stride, size = x->size; if ((r<0) || (r>1)){ FFF_WARNING("Ratio must be in [0,1], returning zero"); return 0.0; } if (size == 1) return data[0]; /* Find the smallest index p so that p >= r * size */ if (!interp) { pp = r * size; p = FFF_UNSIGNED_CEIL(pp); if (p == size) return FFF_POSINF; m = _fff_pth_element(data, p, stride, size); } else { double wm, wM; pp = r * (size-1); p = FFF_UNSIGNED_FLOOR(pp); wM = pp - (double)p; wm = 1.0 - wM; if (wM <= 0) m = _fff_pth_element(data, p, stride, size); else { double am, aM; _fff_pth_interval(&am, &aM, data, p, stride, size); m = wm*am + wM*aM; } } return m; } /*** STATIC FUNCTIONS ***/ /* BEWARE: the input array x gets modified! */ /* Pick up the sample value a so that: (p+1) sample values are <= a AND the remaining sample values are >= a */ #define SWAP(a, b) {tmp=(a); (a)=(b); (b)=tmp;} static double _fff_pth_element(double* x, size_t p, size_t stride, size_t n) { double a, tmp; double *bufl, *bufr; size_t i, j, il, jr, stop1, stop2; int same_extremities; stop1 = 0; il = 0; jr = n-1; while (stop1 == 0) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) return a; bufl += stride; i = il + 1; j = jr; stop2 = 0; while (stop2 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop2 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values. This situation can only occur with i == j */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop2 = 1; } } /* At this point, we know that il <= j <= i; moreover: if k <= j, x(j) <= a and if k > j, x(j) >= a if k < i, x(i) <= a and if k >= i, x(i) >= a We hence have: (j+1) values <= a and the remaining (n-j-1) >= a i values <= a and the remaining (n-i) >= a */ if (j > p) jr = j; else if (j < p) il = i; else /* j == p */ stop1 = 1; } return a; } /* BEWARE: the input array x gets modified! */ static void _fff_pth_interval(double* am, double* aM, double* x, size_t p, size_t stride, size_t n) { double a, tmp; double *bufl, *bufr; size_t i, j, il, jr, stop1, stop2, stop3; size_t pp = p+1; int same_extremities = 0; *am = 0.0; *aM = 0.0; stop1 = 0; stop2 = 0; il = 0; jr = n-1; while ((stop1 == 0) || (stop2 == 0)) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) { *am=a; *aM=a; return; } bufl += stride; i = il + 1; j = jr; stop3 = 0; while (stop3 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop3 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop3 = 1; } } /* At this point, we know that there are (j+1) datapoints <=a including a itself, and another (n-j-1) datapoints >=a */ if (j > pp) jr = j; else if (j < p) il = i; /* Case: found percentile at p */ else if (j == p) { il = i; *am = a; stop1 = 1; } /* Case: found percentile at (p+1), ie j==(p+1) */ else { jr = j; *aM = a; stop2 = 1; } } return; } /* Sort x by ascending order and reorder w accordingly. */ double fff_vector_wmedian_from_sorted_data (const fff_vector* x_sorted, const fff_vector* w) { size_t i; double mu, sumW, WW, WW_prev, xx, xx_prev, ww; double *bxx, *bww; /* Compute the sum of weights */ sumW = (double) fff_vector_sum(w); if (sumW <= 0.0) return FFF_NAN; /* Find the smallest index such that the cumulative density > 0.5 */ i = 0; xx = FFF_NEGINF; WW = 0.0; bxx = x_sorted->data; bww = w->data; while (WW <= .5) { xx_prev = xx; WW_prev = WW; xx = *bxx; ww = *bww / sumW; WW += ww; i ++; bxx += x_sorted->stride; bww += w->stride; } /* Linearly interpolated median */ if (i == 1) mu = xx; else mu = .5*(xx_prev+xx) + (.5-WW_prev)*(xx-xx_prev)/ww; return mu; } nipy-0.6.1/lib/fff/fff_vector.h000066400000000000000000000115041470056100100162730ustar00rootroot00000000000000/*! \file fff_vector.h \brief fff vector object \author Alexis Roche \date 2003-2008 */ #ifndef FFF_VECTOR #define FFF_VECTOR #ifdef __cplusplus extern "C" { #endif #include "fff_base.h" #include /*! \struct fff_vector \brief The fff vector structure */ typedef struct { size_t size; size_t stride; double* data; int owner; } fff_vector; /*! \brief fff vector constructor \param size vector size */ extern fff_vector* fff_vector_new(size_t size); /*! \brief fff vector destructor \param thisone instance to delete */ extern void fff_vector_delete(fff_vector* thisone); /*! \brief Vector view \param data data array \param size array size \param stride array stride */ extern fff_vector fff_vector_view(const double* data, size_t size, size_t stride); /*! \brief Get an element \param x vector \param i index */ extern double fff_vector_get (const fff_vector * x, size_t i); /*! \brief Set an element \param x vector \param i index \param a value to set */ extern void fff_vector_set (fff_vector * x, size_t i, double a); /*! \brief Set all elements to a constant value \param x vector \param a value to set */ extern void fff_vector_set_all (fff_vector * x, double a); extern void fff_vector_scale (fff_vector * x, double a); extern void fff_vector_add_constant (fff_vector * x, double a); /*! \brief Copy a vector \param x input vector \param y output vector */ extern void fff_vector_memcpy( fff_vector* x, const fff_vector* y ); /*! \brief view or copy an existing buffer \param x destination vector \param data pre-allocated buffer \param datatype data type \param stride stride in relative units (1 means contiguous array) */ extern void fff_vector_fetch(fff_vector* x, const void* data, fff_datatype datatype, size_t stride); /*! \brief Add two vectors \param x output vector \param y constant vector */ extern void fff_vector_add (fff_vector * x, const fff_vector * y); /*! \brief Compute the difference x-y \param x output vector \param y constant vector */ extern void fff_vector_sub (fff_vector * x, const fff_vector * y); extern void fff_vector_mul (fff_vector * x, const fff_vector * y); extern void fff_vector_div (fff_vector * x, const fff_vector * y); /*! \brief Sum up vector elements \param x input vector */ extern long double fff_vector_sum( const fff_vector* x ); /*! \brief Sum of squared differences \param x input vector \param m offset value, either fixed or set to the mean \param fixed_offset true if the offset is to be held fixed Compute the sum: \f$ \sum_i (x_i-a)^2 \f$ where \a a is a given offset. */ extern long double fff_vector_ssd( const fff_vector* x, double* m, int fixed_offset ); extern long double fff_vector_wsum( const fff_vector* x, const fff_vector* w, long double* sumw ); extern long double fff_vector_sad( const fff_vector* x, double m ); /*! \brief Fast median from non-const vector \param x input vector Beware that the input array is re-arranged. This function does not require the input array to be sorted in ascending order. It deals itself with sorting the data, and this is done in a partial way, yielding a faster algorithm. */ extern double fff_vector_median( fff_vector* x ); /*! \brief Sample percentile, or quantile from non-const array \param input vector \param r value between 0 and 1 \param interp interpolation flag If \c interp is \c FALSE, this function returns the smallest sample value \a q that is greater than or equal to a proportion \a r of all sample values; more precisely, the number of sample values that are greater or equal to \a q is smaller or equal to \a (1-r) times the sample size. If \c interp is \c TRUE, then the quantile is defined from a linear interpolation of the empirical cumulative distribution. For instance, if \a r = 0.5 and \c interp = \c TRUE, \a q is the usual median; the \c interp flag does not play any role if the sample size is odd. Similarly to \c fff_median_from_temp_data, the array elements are re-arranged. */ extern double fff_vector_quantile( fff_vector* x, double r, int interp ); /*! \brief Weighted median \param x already sorted data \param w weight vector Compute the weighted median of \c x_sorted using the weights in \c w, assuming the elements in \c x_sorted are in ascending order. Notice, the function does not check for negative weights; if the weights sum up to a negative value, \c FFF_NAN is returned. */ extern double fff_vector_wmedian_from_sorted_data ( const fff_vector* x_sorted, const fff_vector* w ); #ifdef __cplusplus } #endif #endif nipy-0.6.1/lib/fff/meson.build000066400000000000000000000004231470056100100161370ustar00rootroot00000000000000fff = files( 'fff_array.c', 'fff_base.c', 'fff_blas.c', 'fff_gen_stats.c', 'fff_glm_kalman.c', 'fff_glm_twolevel.c', 'fff_lapack.c', 'fff_matrix.c', 'fff_onesample_stat.c', 'fff_routines.c', 'fff_specfun.c', 'fff_twosample_stat.c', 'fff_vector.c' ) nipy-0.6.1/lib/fff_python_wrapper/000077500000000000000000000000001470056100100171375ustar00rootroot00000000000000nipy-0.6.1/lib/fff_python_wrapper/fff.pxd000066400000000000000000000160721470056100100204230ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough # :Author: Alexis Roche # Include numpy defines via Cython from numpy cimport ndarray, import_array, npy_intp # Redefine size_t ctypedef unsigned long int size_t # Exports from fff_base.h cdef extern from "fff_base.h": ctypedef enum fff_datatype: FFF_UNKNOWN_TYPE = -1, FFF_UCHAR = 0, FFF_SCHAR = 1, FFF_USHORT = 2, FFF_SSHORT = 3, FFF_UINT = 4, FFF_INT = 5, FFF_ULONG = 6, FFF_LONG = 7, FFF_FLOAT = 8, FFF_DOUBLE = 9 unsigned int fff_nbytes(fff_datatype type) # Exports from fff_vector.h cdef extern from "fff_vector.h": ctypedef struct fff_vector: size_t size size_t stride int owner double* data fff_vector* fff_vector_new(size_t n) void fff_vector_delete(fff_vector* x) fff_vector fff_vector_view(double* data, size_t size, size_t stride) double fff_vector_get(fff_vector * x, size_t i) void fff_vector_set(fff_vector * x, size_t i, double a) void fff_vector_set_all(fff_vector * x, double a) void fff_vector_scale(fff_vector * x, double a) void fff_vector_add_constant(fff_vector * x, double a) void fff_vector_memcpy(fff_vector* x, fff_vector* y) void fff_vector_fetch(fff_vector* x, void* data, fff_datatype datatype, size_t stride) void fff_vector_add(fff_vector * x, fff_vector * y) void fff_vector_sub(fff_vector * x, fff_vector * y) void fff_vector_mul(fff_vector * x, fff_vector * y) void fff_vector_div(fff_vector * x, fff_vector * y) long double fff_vector_sum(fff_vector* x) long double fff_vector_ssd(fff_vector* x, double* m, int fixed) long double fff_vector_sad(fff_vector* x, double m) double fff_vector_median(fff_vector* x) double fff_vector_quantile(fff_vector* x, double r, int interp) double fff_vector_wmedian_from_sorted_data(fff_vector* x_sorted, fff_vector* w) # Exports from fff_matrix.h cdef extern from "fff_matrix.h": ctypedef struct fff_matrix: size_t size1 size_t size2 size_t tda int owner double* data fff_matrix* fff_matrix_new(size_t nr, size_t nc) void fff_matrix_delete(fff_matrix* A) fff_matrix fff_matrix_view(double* data, size_t size1, size_t size2, size_t tda) double fff_matrix_get(fff_matrix* A, size_t i, size_t j) void fff_matrix_set_all(fff_matrix * A, double a) void fff_matrix_scale(fff_matrix * A, double a) void fff_matrix_add_constant(fff_matrix * A, double a) void fff_matrix_get_row(fff_vector * x, fff_matrix * A, size_t i) fff_matrix_get_col(fff_vector * x, fff_matrix * A, size_t j) fff_matrix_get_diag(fff_vector * x, fff_matrix * A) fff_matrix_set_row(fff_matrix * A, size_t i, fff_vector * x) fff_matrix_set_col(fff_matrix * A, size_t j, fff_vector * x) fff_matrix_set_diag(fff_matrix * A, fff_vector * x) void fff_matrix_transpose(fff_matrix* A, fff_matrix* B) void fff_matrix_memcpy(fff_matrix* A, fff_matrix* B) fff_matrix fff_matrix_view(double* data, size_t size1, size_t size2, size_t tda) void fff_matrix_add (fff_matrix * A, fff_matrix * B) void fff_matrix_sub (fff_matrix * A, fff_matrix * B) void fff_matrix_mul_elements (fff_matrix * A, fff_matrix * B) void fff_matrix_div_elements (fff_matrix * A, fff_matrix * B) # Exports from fff_array.h cdef extern from "fff_array.h": ctypedef enum fff_array_ndims: FFF_ARRAY_1D = 1, FFF_ARRAY_2D = 2, FFF_ARRAY_3D = 3, FFF_ARRAY_4D = 4 ctypedef struct fff_array: fff_array_ndims ndims fff_datatype datatype size_t dimX size_t dimY size_t dimZ size_t dimT unsigned int offsetX unsigned int offsetY unsigned int offsetZ unsigned int offsetT void* data int owner fff_array* fff_array_new(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ, size_t dimT) fff_array* fff_array_new1d(fff_datatype datatype, size_t dimX) fff_array* fff_array_new2d(fff_datatype datatype, size_t dimX, size_t dimY) fff_array* fff_array_new3d(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ) void fff_array_delete(fff_array* thisone) double fff_array_get(fff_array* thisone, size_t x, size_t y, size_t z, size_t t) fff_array fff_array_get_block(fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ, size_t t0, size_t t1, size_t fT) fff_array fff_array_get_block1d(fff_array* thisone, size_t x0, size_t x1, size_t fX) fff_array fff_array_get_block2d(fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY) fff_array fff_array_get_block3d(fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ) void fff_array_set(fff_array* thisone, size_t x, size_t y, size_t z, size_t t, double value) void fff_array_set1d(fff_array* thisone, size_t x, double value) void fff_array_set2d(fff_array* thisone, size_t x, size_t y, double value) void fff_array_set3d(fff_array* thisone, size_t x, size_t y, size_t z, double value) void fff_array_set_all(fff_array* thisone, double c) void fff_array_extrema(double* min, double* max, fff_array* thisone) void fff_array_copy(fff_array* ares, fff_array* asrc) void fff_array_add(fff_array * x, fff_array * y) void fff_array_sub(fff_array * x, fff_array * y) void fff_array_div(fff_array * x, fff_array * y) void fff_array_mul(fff_array * x, fff_array * y) void fff_array_clamp(fff_array* ares, fff_array* asrc, double th, int* clamp) # Exports from the Python fff wrapper cdef extern from "fffpy.h": ctypedef struct fffpy_multi_iterator: int narr int axis fff_vector** vector size_t index size_t size void fffpy_import_array() fff_vector* fff_vector_fromPyArray(ndarray x) ndarray fff_vector_toPyArray(fff_vector* y) ndarray fff_vector_const_toPyArray(fff_vector* y) fff_matrix* fff_matrix_fromPyArray(ndarray x) ndarray fff_matrix_toPyArray(fff_matrix* y) ndarray fff_matrix_const_toPyArray(fff_matrix* y) fff_array* fff_array_fromPyArray(ndarray x) ndarray fff_array_toPyArray(fff_array* y) fff_datatype fff_datatype_fromNumPy(int npy_type) int fff_datatype_toNumPy(fff_datatype fff_type) void fff_vector_fetch_using_NumPy(fff_vector* y, char* data, npy_intp stride, int type, int itemsize) fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...) void fffpy_multi_iterator_delete(fffpy_multi_iterator* thisone) void fffpy_multi_iterator_update(fffpy_multi_iterator* thisone) void fffpy_multi_iterator_reset(fffpy_multi_iterator* thisone) nipy-0.6.1/lib/fff_python_wrapper/fffpy.c000066400000000000000000000405721470056100100204250ustar00rootroot00000000000000#include "fffpy.h" #include #include #define COPY_BUFFERS_USING_NUMPY 1 /* This function must be called before the module can work because PyArray_API is defined static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ void* fffpy_import_array(void) { import_array(); } /* Static functions */ static npy_intp _PyArray_main_axis(const PyArrayObject* x, int* ok); static fff_vector* _fff_vector_new_from_buffer(const char* data, npy_intp dim, npy_intp stride, int type, int itemsize); static fff_vector* _fff_vector_new_from_PyArrayIter(const PyArrayIterObject* it, npy_intp axis); static void _fff_vector_sync_with_PyArrayIter(fff_vector* y, const PyArrayIterObject* it, npy_intp axis); /* Routines for copying 1d arrays into contiguous double arrays */ #if COPY_BUFFERS_USING_NUMPY # define COPY_BUFFER(y, data, stride, type, itemsize) \ fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize); #else # define COPY_BUFFER(y, data, stride, type, itemsize) \ fff_vector_fetch(y, (void*)data, fff_datatype_fromNumPy(type), stride/itemsize) #endif /* Copy a buffer using numpy. Copy buffer x into y assuming that y is contiguous. */ void fff_vector_fetch_using_NumPy(fff_vector* y, const char* x, npy_intp stride, int type, int itemsize) { npy_intp dim[1] = {(npy_intp)y->size}; npy_intp strides[1] = {stride}; PyArrayObject* X = (PyArrayObject*) PyArray_New(&PyArray_Type, 1, dim, type, strides, (void*)x, itemsize, NPY_BEHAVED, NULL); PyArrayObject* Y = (PyArrayObject*) PyArray_SimpleNewFromData(1, dim, NPY_DOUBLE, (void*)y->data); PyArray_CopyInto(Y, X); Py_XDECREF(Y); Py_XDECREF(X); return; } /* Create a fff_vector from an already allocated buffer. This function acts as a fff_vector constructor that is compatible with fff_vector_delete. */ static fff_vector* _fff_vector_new_from_buffer(const char* data, npy_intp dim, npy_intp stride, int type, int itemsize) { fff_vector* y; size_t sizeof_double = sizeof(double); /* If the input array is double and is aligned, just wrap without copying */ if ((type == NPY_DOUBLE) && (itemsize==sizeof_double)) { y = (fff_vector*)malloc(sizeof(fff_vector)); y->size = (size_t)dim; y->stride = (size_t)stride/sizeof_double; y->data = (double*)data; y->owner = 0; } /* Otherwise, output a owner contiguous vector with copied data */ else { y = fff_vector_new((size_t)dim); COPY_BUFFER(y, data, stride, type, itemsize); } return y; } /* Find the axis with largest dimension */ npy_intp _PyArray_main_axis(const PyArrayObject* x, int* ok) { npy_intp axis, count, i, dim, ndim = PyArray_NDIM(x); *ok = 1; axis = 0; count = 0; for(i=0; i 1) { count ++; axis = i; } } if (count > 1) *ok = 0; return axis; } fff_vector* fff_vector_fromPyArray(const PyArrayObject* x) { fff_vector* y; int ok; npy_intp axis = _PyArray_main_axis(x, &ok); if (!ok) { FFF_ERROR("Input array is not a vector", EINVAL); return NULL; } y = _fff_vector_new_from_buffer(PyArray_DATA(x), PyArray_DIM(x, axis), PyArray_STRIDE(x, axis), PyArray_TYPE(x), PyArray_ITEMSIZE(x)); return y; } /* Export a fff_vector to a PyArray, and delete it. This function is a fff_vector destructor compatible with any either fff_vector_new or _fff_vector_new_from_buffer. */ PyArrayObject* fff_vector_toPyArray(fff_vector* y) { PyArrayObject* x; size_t size; npy_intp dims[1]; if (y == NULL) return NULL; size = y->size; dims[0] = (npy_intp) size; /* If the fff_vector is owner (hence contiguous), just pass the buffer to Python and transfer ownership */ if (y->owner) { x = (PyArrayObject*) PyArray_SimpleNewFromData(1, dims, NPY_DOUBLE, (void*)y->data); x->flags = (x->flags) | NPY_OWNDATA; } /* Otherwise, create Python array from scratch */ else x = fff_vector_const_toPyArray(y); /* Ciao bella */ free(y); return x; } /* Export without deleting */ PyArrayObject* fff_vector_const_toPyArray(const fff_vector* y) { PyArrayObject* x; size_t i, size = y->size, stride = y->stride; double* data = (double*) malloc(size*sizeof(double)); double* bufX = data; double* bufY = y->data; npy_intp dims[1]; dims[0] = (npy_intp) size; for (i=0; iflags = (x->flags) | NPY_OWNDATA; return x; } /* Get a fff_matrix from an input PyArray. This function acts as a fff_vector constructor that is compatible with fff_vector_delete. */ fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x) { fff_matrix* y; npy_intp dim[2]; PyArrayObject* xd; /* Check that the input object is a two-dimensional array */ if (PyArray_NDIM(x) != 2) { FFF_ERROR("Input array is not a matrix", EINVAL); return NULL; } /* If the PyArray is double, contiguous and aligned just wrap without copying */ if ((PyArray_TYPE(x) == NPY_DOUBLE) && (PyArray_ISCONTIGUOUS(x)) && (PyArray_ISALIGNED(x))) { y = (fff_matrix*) malloc(sizeof(fff_matrix)); y->size1 = (size_t) PyArray_DIM(x,0); y->size2 = (size_t) PyArray_DIM(x,1); y->tda = y->size2; y->data = PyArray_DATA(x); y->owner = 0; } /* Otherwise, output a owner (contiguous) matrix with copied data */ else { size_t dim0 = PyArray_DIM(x,0), dim1 = PyArray_DIM(x,1); y = fff_matrix_new((size_t)dim0, (size_t)dim1); dim[0] = dim0; dim[1] = dim1; xd = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_DOUBLE, (void*)y->data); PyArray_CopyInto(xd, (PyArrayObject*)x); Py_XDECREF(xd); } return y; } /* Export a fff_matrix to a PyArray, and delete it. This function is a fff_matrix destructor compatible with any of the following constructors: fff_matrix_new and fff_matrix_fromPyArray. */ PyArrayObject* fff_matrix_toPyArray(fff_matrix* y) { PyArrayObject* x; size_t size1; size_t size2; size_t tda; npy_intp dims[2]; if (y == NULL) return NULL; size1 = y->size1; size2 = y->size2; tda = y->tda; dims[0] = (npy_intp) size1; dims[1] = (npy_intp) size2; /* If the fff_matrix is contiguous and owner, just pass the buffer to Python and transfer ownership */ if ((tda == size2) && (y->owner)) { x = (PyArrayObject*) PyArray_SimpleNewFromData(2, dims, NPY_DOUBLE, (void*)y->data); x->flags = (x->flags) | NPY_OWNDATA; } /* Otherwise, create PyArray from scratch. Note, the input fff_matrix is necessarily in row-major order. */ else x = fff_matrix_const_toPyArray(y); /* Ciao bella */ free(y); return x; } /* Export without deleting */ PyArrayObject* fff_matrix_const_toPyArray(const fff_matrix* y) { PyArrayObject* x; size_t size1 = y->size1, size2 = y->size2, tda = y->tda; size_t i, j, pos; double* data = (double*) malloc(size1*size2*sizeof(double)); double* bufX = data; double* bufY = y->data; npy_intp dims[2]; dims[0] = (npy_intp) size1; dims[1] = (npy_intp) size2; for (i=0; iflags = (x->flags) | NPY_OWNDATA; return x; } /** Static routines **/ /**** Data type conversions *****/ fff_datatype fff_datatype_fromNumPy(int npy_type) { fff_datatype fff_type; switch (npy_type) { case NPY_UBYTE: fff_type = FFF_UCHAR; break; case NPY_BYTE: fff_type = FFF_SCHAR; break; case NPY_USHORT: fff_type = FFF_USHORT; break; case NPY_SHORT: fff_type = FFF_SSHORT; break; case NPY_UINT: fff_type = FFF_UINT; break; case NPY_INT: fff_type = FFF_INT; break; case NPY_ULONG: fff_type = FFF_ULONG; break; case NPY_LONG: fff_type = FFF_LONG; break; case NPY_FLOAT: fff_type = FFF_FLOAT; break; case NPY_DOUBLE: fff_type = FFF_DOUBLE; break; default: fff_type = FFF_UNKNOWN_TYPE; break; } /* Return the datatype */ return fff_type; } int fff_datatype_toNumPy(fff_datatype fff_type) { int npy_type; switch(fff_type) { case FFF_UCHAR: npy_type = NPY_UBYTE; break; case FFF_SCHAR: npy_type = NPY_BYTE; break; case FFF_USHORT: npy_type = NPY_USHORT; break; case FFF_SSHORT: npy_type = NPY_SHORT; break; case FFF_UINT: npy_type = NPY_UINT; break; case FFF_INT: npy_type = NPY_INT; break; case FFF_ULONG: npy_type = NPY_ULONG; break; case FFF_LONG: npy_type = NPY_LONG; break; case FFF_FLOAT: npy_type = NPY_FLOAT; break; case FFF_DOUBLE: npy_type = NPY_DOUBLE; break; default: npy_type = NPY_NOTYPE; break; } return npy_type; } /**** fff_array interface ****/ fff_array* fff_array_fromPyArray(const PyArrayObject* x) { fff_array* y; fff_datatype datatype; unsigned int nbytes; size_t dimX = 1, dimY = 1, dimZ = 1, dimT = 1; size_t offX = 0, offY = 0, offZ = 0, offT = 0; size_t ndims = (size_t)PyArray_NDIM(x); /* Check that the input array has less than four dimensions */ if (ndims > 4) { FFF_ERROR("Input array has more than four dimensions", EINVAL); return NULL; } /* Check that the input array is aligned */ if (! PyArray_ISALIGNED(x)) { FFF_ERROR("Input array is not aligned", EINVAL); return NULL; } /* Match the data type */ datatype = fff_datatype_fromNumPy(PyArray_TYPE(x)); if (datatype == FFF_UNKNOWN_TYPE) { FFF_ERROR("Unrecognized data type", EINVAL); return NULL; } /* Dimensions and offsets */ nbytes = fff_nbytes(datatype); dimX = PyArray_DIM(x, 0); offX = PyArray_STRIDE(x, 0)/nbytes; if (ndims > 1) { dimY = PyArray_DIM(x, 1); offY = PyArray_STRIDE(x, 1)/nbytes; if (ndims > 2) { dimZ = PyArray_DIM(x, 2); offZ = PyArray_STRIDE(x, 2)/nbytes; if (ndims > 3) { dimT = PyArray_DIM(x, 3); offT = PyArray_STRIDE(x, 3)/nbytes; } } } /* Create array (not owner) */ y = (fff_array*)malloc(sizeof(fff_array)); *y = fff_array_view(datatype, PyArray_DATA(x), dimX, dimY, dimZ, dimT, offX, offY, offZ, offT); return y; } PyArrayObject* fff_array_toPyArray(fff_array* y) { PyArrayObject* x; npy_intp dims[4]; int datatype; fff_array* yy; if (y == NULL) return NULL; dims[0] = y->dimX; dims[1] = y->dimY; dims[2] = y->dimZ; dims[3] = y->dimT; /* Match data type */ datatype = fff_datatype_toNumPy(y->datatype); if (datatype == NPY_NOTYPE) { FFF_ERROR("Unrecognized data type", EINVAL); return NULL; } /* Make sure the fff array owns its data, which may require a copy */ if (y->owner) yy = y; else { yy = fff_array_new(y->datatype, y->dimX, y->dimY, y->dimZ, y->dimT); fff_array_copy(yy, y); } /* Create a Python array from the array data (which is contiguous since it is owner). We can use PyArray_SimpleNewFromData given that yy is C-contiguous by fff_array_new. */ x = (PyArrayObject*) PyArray_SimpleNewFromData(yy->ndims, dims, datatype, (void*)yy->data); /* Transfer ownership to Python */ x->flags = (x->flags) | NPY_OWNDATA; /* Dealloc memory if needed */ if (! y->owner) free(yy); /* Delete array */ free(y); return x; } /******************************************************************** Multi-iterator object. ********************************************************************/ static int _PyArray_BroadcastAllButAxis (PyArrayMultiIterObject* mit, int axis); /* Create a fff multi iterator object. Involves creating a PyArrayMultiArrayIter instance that lets us iterate simultaneously on an arbitrary number of numpy arrays EXCEPT in one common axis. There does not seem to exist a built-in PyArrayMultiArrayIter constructor for this usage. If it pops up one day, part of the following code should be replaced. Similarly to the default PyArrayMultiArrayIter constructor, we need to set up broadcasting rules. For now, we simply impose that all arrays have exactly the same number of dimensions and that all dimensions be equal except along the "non-iterated" axis. FIXME: The following code does not perform any checking, and will surely crash if the arrays do not fulfill the conditions. */ fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...) { fffpy_multi_iterator* thisone; va_list va; fff_vector** vector; PyArrayMultiIterObject *multi; PyObject *current, *arr; int i, err=0; /* Create new instance */ thisone = (fffpy_multi_iterator*)malloc(sizeof(fffpy_multi_iterator)); /* Static size of PyArrayMultiIterObject. * * https://github.com/numpy/numpy/issues/26765#issuecomment-2391737671 */ multi = PyArray_malloc(PyArrayMultiIter_Type.tp_basicsize); vector = (fff_vector**)malloc(narr*sizeof(fff_vector*)); /* Initialize the PyArrayMultiIterObject instance from the variadic arguments */ PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); for (i=0; iiters[i] = NULL; multi->numiter = narr; multi->index = 0; va_start(va, axis); for (i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterAllButAxis(arr, &axis); Py_DECREF(arr); } } va_end(va); /* Test */ if (!err && _PyArray_BroadcastAllButAxis(multi, axis) < 0) err=1; if (err) { FFF_ERROR("Cannot create broadcast object", ENOMEM); free(thisone); free(vector); Py_DECREF(multi); return NULL; } /* Initialize the multi iterator */ PyArray_MultiIter_RESET(multi); /* Create the fff vectors (views or copies) */ for(i=0; iiters[i], axis); /* Instantiate fiels */ thisone->narr = narr; thisone->axis = axis; thisone->vector = vector; thisone->multi = multi; thisone->index = thisone->multi->index; thisone->size = thisone->multi->size; return thisone; } void fffpy_multi_iterator_delete(fffpy_multi_iterator* thisone) { unsigned int i; Py_DECREF(thisone->multi); for(i=0; inarr; i++) fff_vector_delete(thisone->vector[i]); free(thisone->vector); free(thisone); return; } void fffpy_multi_iterator_update(fffpy_multi_iterator* thisone) { unsigned int i; PyArray_MultiIter_NEXT(thisone->multi); for(i=0; inarr; i++) _fff_vector_sync_with_PyArrayIter(thisone->vector[i], (const PyArrayIterObject*)thisone->multi->iters[i], thisone->axis); thisone->index = thisone->multi->index; return; } void fffpy_multi_iterator_reset(fffpy_multi_iterator* thisone) { unsigned int i; PyArray_MultiIter_RESET(thisone->multi); for(i=0; inarr; i++) _fff_vector_sync_with_PyArrayIter(thisone->vector[i], (const PyArrayIterObject*)thisone->multi->iters[i], thisone->axis); thisone->index = thisone->multi->index; return; } static int _PyArray_BroadcastAllButAxis (PyArrayMultiIterObject* mit, int axis) { int i, nd; npy_intp size, tmp; PyArrayIterObject *it; /* Not very robust */ it = mit->iters[0]; /* Set the dimensions */ nd = it->ao->nd; mit->nd = nd; for(i=0, size=1; iao->dimensions[i]; mit->dimensions[i] = tmp; if (i!=axis) size *= tmp; } mit->size = size; /* Not very robust either */ return 0; } /* Create an fff_vector from a PyArrayIter object */ fff_vector* _fff_vector_new_from_PyArrayIter(const PyArrayIterObject* it, npy_intp axis) { fff_vector* y; char* data = PyArray_ITER_DATA(it); PyArrayObject* ao = (PyArrayObject*) it->ao; npy_intp dim = PyArray_DIM(ao, axis); npy_intp stride = PyArray_STRIDE(ao, axis); int type = PyArray_TYPE(ao); int itemsize = PyArray_ITEMSIZE(ao); y = _fff_vector_new_from_buffer(data, dim, stride, type, itemsize); return y; } /* Fetch vector data from an iterator (view or copy) */ void _fff_vector_sync_with_PyArrayIter(fff_vector* y, const PyArrayIterObject* it, npy_intp axis) { if (y->owner) { PyArrayObject* ao = (PyArrayObject*) it->ao; COPY_BUFFER(y, PyArray_ITER_DATA(it), PyArray_STRIDE(ao, axis), PyArray_TYPE(ao), PyArray_ITEMSIZE(ao)); } else y->data = (double*) PyArray_ITER_DATA(it); return; } nipy-0.6.1/lib/fff_python_wrapper/fffpy.h000066400000000000000000000120311470056100100204170ustar00rootroot00000000000000#include #include #include #include #include /*! \file fffpy.h \brief Python interface to \a fff \author Alexis Roche, Benjamin Thyreau, Bertrand Thirion \date 2006-2009 */ #ifndef NPY_VERSION #define npy_intp intp #define NPY_OWNDATA OWNDATA #define NPY_CONTIGUOUS CONTIGUOUS #define NPY_BEHAVED BEHAVED_FLAGS #endif #define fffpyZeroLONG() (PyArrayObject*)PyArray_SimpleNew(1,(npy_intp*)"\0\0\0\0", PyArray_LONG); /*! \brief Import numpy C API Any Python module written in C, and using the fffpy interface, must call this function to work, because \c PyArray_API is defined static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ extern void* fffpy_import_array(void); /*! \brief Convert \c PyArrayObject to \c fff_vector \param x input numpy array This function may be seen as a \c fff_vector constructor compatible with \c fff_vector_delete. If the input has type \c PyArray_DOUBLE, whether or not it is contiguous, the new \c fff_vector is not self-owned and borrows a reference to the PyArrayObject's data. Otherwise, data are copied and the \c fff_vector is self-owned (hence contiguous) just like when created from scratch. Notice, the function returns \c NULL if the input array has more than one dimension. */ extern fff_vector* fff_vector_fromPyArray(const PyArrayObject* x); /*! \brief Convert \c fff_vector to \c PyArrayObject \param y input vector Conversely to \c fff_vector_fromPyArray, this function acts as a \c fff_vector destructor compatible with \c fff_vector_new, returning a new PyArrayObject reference. If the input vector is contiguous and self-owned, array ownership is simply transferred to Python; otherwise, the data array is copied. */ extern PyArrayObject* fff_vector_toPyArray(fff_vector* y); /*! \brief Convert \c fff_vector to \c PyArrayObject, without destruction \param y input const vector Unlike \c fff_vector_toPyArray, this function does not delete the input fff_vector. It always forces a copy of the data array. This function is useful when exporting to Python a fff_vector that belongs to a local structure having its own destruction method. */ extern PyArrayObject* fff_vector_const_toPyArray(const fff_vector* y); /*! \brief Convert \c PyArrayObject to \c fff_matrix \param x input numpy array This function may be seen as a \c fff_matrix constructor compatible with \c fff_matrix_free. If the input has type \c PyArray_DOUBLE and is contiguous, the new \c fff_matrix is not self-owned and borrows a reference to the PyArrayObject's data. Otherwise, data are copied and the \c fff_matrix is self-owned (hence contiguous) just like when created from scratch. \c NULL is returned if the input array does not have exactly two dimensions. Remarks: 1) non-contiguity provokes a copy because the \c fff_matrix structure does not support strides; 2) matrices in column-major order (Fortran convention) always get copied using this function. */ extern fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x); /*! \brief Convert \c fff_matrix to \c PyArrayObject \param y input matrix Conversely to \c fff_matrix_fromPyArray, this function acts as a \c fff_matrix destructor compatible with \c fff_matrix_new, returning a new PyArrayObject reference. If the input matrix is contiguous and self-owned, array ownership is simply transferred to Python; otherwise, the data array is copied. */ extern PyArrayObject* fff_matrix_toPyArray(fff_matrix* y); /*! \brief Convert \c fff_matrix to \c PyArrayObject, without destruction \param y input const matrix Unlike \c fff_matrix_toPyArray, this function does not delete the input fff_matrix. It always forces a copy of the data array. This function is useful when exporting to Python a fff_matrix that belongs to a local structure having its own destruction method. */ extern PyArrayObject* fff_matrix_const_toPyArray(const fff_matrix* y); /*! \brief Maps a numpy array to an fff_array \param x input array This function instantiates an fff_array that borrows data from the numpy array. Delete using \c fff_array_delete. */ extern fff_array* fff_array_fromPyArray(const PyArrayObject* x); extern PyArrayObject* fff_array_toPyArray(fff_array* y); extern fff_datatype fff_datatype_fromNumPy(int npy_type); extern int fff_datatype_toNumPy(fff_datatype fff_type); extern void fff_vector_fetch_using_NumPy(fff_vector* y, const char* data, npy_intp stride, int type, int itemsize); /* Multi-iterator object. */ typedef struct { int narr; int axis; fff_vector** vector; size_t index; size_t size; PyArrayMultiIterObject *multi; } fffpy_multi_iterator; extern fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...); extern void fffpy_multi_iterator_delete(fffpy_multi_iterator* thisone); extern void fffpy_multi_iterator_update(fffpy_multi_iterator* thisone); extern void fffpy_multi_iterator_reset(fffpy_multi_iterator* thisone); nipy-0.6.1/lib/fff_python_wrapper/meson.build000066400000000000000000000000321470056100100212740ustar00rootroot00000000000000fff += files(['fffpy.c']) nipy-0.6.1/lib/lapack_lite/000077500000000000000000000000001470056100100155055ustar00rootroot00000000000000nipy-0.6.1/lib/lapack_lite/blas_lite.c000066400000000000000000004061701470056100100176170ustar00rootroot00000000000000/* NOTE: This is generated code. Look in Misc/lapack_lite for information on remaking this file. */ #include "f2c.h" #ifdef HAVE_CONFIG #include "config.h" #else extern doublereal dlamch_(char *); #define EPSILON dlamch_("Epsilon") #define SAFEMINIMUM dlamch_("Safe minimum") #define PRECISION dlamch_("Precision") #define BASE dlamch_("Base") #endif extern doublereal dlapy2_(doublereal *x, doublereal *y); /* Table of constant values */ static doublereal c_b90 = 1.; static integer c__1 = 1; doublereal dasum_(integer *n, doublereal *dx, integer *incx) { /* System generated locals */ integer i__1, i__2; doublereal ret_val, d__1, d__2, d__3, d__4, d__5, d__6; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer nincx, mp1; /* Purpose ======= takes the sum of the absolute values. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ ret_val = 0.; dtemp = 0.; if (*n <= 0 || *incx <= 0) { return ret_val; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ nincx = *n * *incx; i__1 = nincx; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { dtemp += (d__1 = dx[i__], abs(d__1)); /* L10: */ } ret_val = dtemp; return ret_val; /* code for increment equal to 1 clean-up loop */ L20: m = *n % 6; if (m == 0) { goto L40; } i__2 = m; for (i__ = 1; i__ <= i__2; ++i__) { dtemp += (d__1 = dx[i__], abs(d__1)); /* L30: */ } if (*n < 6) { goto L60; } L40: mp1 = m + 1; i__2 = *n; for (i__ = mp1; i__ <= i__2; i__ += 6) { dtemp = dtemp + (d__1 = dx[i__], abs(d__1)) + (d__2 = dx[i__ + 1], abs(d__2)) + (d__3 = dx[i__ + 2], abs(d__3)) + (d__4 = dx[i__ + 3], abs(d__4)) + (d__5 = dx[i__ + 4], abs(d__5)) + (d__6 = dx[i__ + 5], abs(d__6)); /* L50: */ } L60: ret_val = dtemp; return ret_val; } /* dasum_ */ /* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m, ix, iy, mp1; /* Purpose ======= constant times a vector plus a vector. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*da == 0.) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dy[iy] += *da * dx[ix]; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 4; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dy[i__] += *da * dx[i__]; /* L30: */ } if (*n < 4) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 4) { dy[i__] += *da * dx[i__]; dy[i__ + 1] += *da * dx[i__ + 1]; dy[i__ + 2] += *da * dx[i__ + 2]; dy[i__ + 3] += *da * dx[i__ + 3]; /* L50: */ } return 0; } /* daxpy_ */ doublereal dcabs1_(doublecomplex *z__) { /* System generated locals */ doublereal ret_val, d__1, d__2; /* Builtin functions */ double d_imag(doublecomplex *); /* Purpose ======= DCABS1 computes absolute value of a double complex number */ ret_val = (d__1 = z__->r, abs(d__1)) + (d__2 = d_imag(z__), abs(d__2)); return ret_val; } /* dcabs1_ */ /* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m, ix, iy, mp1; /* Purpose ======= copies a vector, x, to a vector, y. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dy[iy] = dx[ix]; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 7; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dy[i__] = dx[i__]; /* L30: */ } if (*n < 7) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 7) { dy[i__] = dx[i__]; dy[i__ + 1] = dx[i__ + 1]; dy[i__ + 2] = dx[i__ + 2]; dy[i__ + 3] = dx[i__ + 3]; dy[i__ + 4] = dx[i__ + 4]; dy[i__ + 5] = dx[i__ + 5]; dy[i__ + 6] = dx[i__ + 6]; /* L50: */ } return 0; } /* dcopy_ */ doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; doublereal ret_val; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer ix, iy, mp1; /* Purpose ======= forms the dot product of two vectors. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ ret_val = 0.; dtemp = 0.; if (*n <= 0) { return ret_val; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp += dx[ix] * dy[iy]; ix += *incx; iy += *incy; /* L10: */ } ret_val = dtemp; return ret_val; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 5; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dtemp += dx[i__] * dy[i__]; /* L30: */ } if (*n < 5) { goto L60; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 5) { dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[ i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ + 4] * dy[i__ + 4]; /* L50: */ } L60: ret_val = dtemp; return ret_val; } /* ddot_ */ /* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer * n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static logical nota, notb; static doublereal temp; static integer i__, j, l, ncola; extern logical lsame_(char *, char *); static integer nrowa, nrowb; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGEMM performs one of the matrix-matrix operations C := alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X', alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Arguments ========== TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n', op( A ) = A. TRANSA = 'T' or 't', op( A ) = A'. TRANSA = 'C' or 'c', op( A ) = A'. Unchanged on exit. TRANSB - CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: TRANSB = 'N' or 'n', op( B ) = B. TRANSB = 'T' or 't', op( B ) = B'. TRANSB = 'C' or 'c', op( B ) = B'. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix op( A ) and of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix op( B ) and the number of columns of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry, K specifies the number of columns of the matrix op( A ) and the number of rows of the matrix op( B ). K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = 'N' or 'n', and is m otherwise. Before entry with TRANSA = 'N' or 'n', the leading m by k part of the array A must contain the matrix A, otherwise the leading k by m part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = 'N' or 'n' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = 'N' or 'n', and is k otherwise. Before entry with TRANSB = 'N' or 'n', the leading k by n part of the array B must contain the matrix B, otherwise the leading n by k part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANSB = 'N' or 'n' then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n matrix ( alpha*op( A )*op( B ) + beta*C ). LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Set NOTA and NOTB as true if A and B respectively are not transposed and set NROWA, NCOLA and NROWB as the number of rows and columns of A and the number of rows of B respectively. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ nota = lsame_(transa, "N"); notb = lsame_(transb, "N"); if (nota) { nrowa = *m; ncola = *k; } else { nrowa = *k; ncola = *m; } if (notb) { nrowb = *k; } else { nrowb = *n; } /* Test the input parameters. */ info = 0; if (! nota && ! lsame_(transa, "C") && ! lsame_( transa, "T")) { info = 1; } else if (! notb && ! lsame_(transb, "C") && ! lsame_(transb, "T")) { info = 2; } else if (*m < 0) { info = 3; } else if (*n < 0) { info = 4; } else if (*k < 0) { info = 5; } else if (*lda < max(1,nrowa)) { info = 8; } else if (*ldb < max(1,nrowb)) { info = 10; } else if (*ldc < max(1,*m)) { info = 13; } if (info != 0) { xerbla_("DGEMM ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And if alpha.eq.zero. */ if (*alpha == 0.) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } return 0; } /* Start the operations. */ if (notb) { if (nota) { /* Form C := alpha*A*B + beta*C. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } } else if (*beta != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L60: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (b[l + j * b_dim1] != 0.) { temp = *alpha * b[l + j * b_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L70: */ } } /* L80: */ } /* L90: */ } } else { /* Form C := alpha*A'*B + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * b[l + j * b_dim1]; /* L100: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L110: */ } /* L120: */ } } } else { if (nota) { /* Form C := alpha*A*B' + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L130: */ } } else if (*beta != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L140: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (b[j + l * b_dim1] != 0.) { temp = *alpha * b[j + l * b_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L150: */ } } /* L160: */ } /* L170: */ } } else { /* Form C := alpha*A'*B' + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * b[j + l * b_dim1]; /* L180: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L190: */ } /* L200: */ } } } return 0; /* End of DGEMM . */ } /* dgemm_ */ /* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal * alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal *beta, doublereal *y, integer *incy) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer lenx, leny, i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ========== TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' y := alpha*A*x + beta*y. TRANS = 'T' or 't' y := alpha*A'*x + beta*y. TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix A. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry, the leading m by n part of the array A must contain the matrix of coefficients. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, m ). Unchanged on exit. X - DOUBLE PRECISION array of DIMENSION at least ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' and at least ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. Before entry, the incremented array X must contain the vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - DOUBLE PRECISION array of DIMENSION at least ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' and at least ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. Before entry with BETA non-zero, the incremented array Y must contain the vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; --y; /* Function Body */ info = 0; if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C") ) { info = 1; } else if (*m < 0) { info = 2; } else if (*n < 0) { info = 3; } else if (*lda < max(1,*m)) { info = 6; } else if (*incx == 0) { info = 8; } else if (*incy == 0) { info = 11; } if (info != 0) { xerbla_("DGEMV ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* Set LENX and LENY, the lengths of the vectors x and y, and set up the start points in X and Y. */ if (lsame_(trans, "N")) { lenx = *n; leny = *m; } else { lenx = *m; leny = *n; } if (*incx > 0) { kx = 1; } else { kx = 1 - (lenx - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (leny - 1) * *incy; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. First form y := beta*y. */ if (*beta != 1.) { if (*incy == 1) { if (*beta == 0.) { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = 0.; /* L10: */ } } else { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = *beta * y[i__]; /* L20: */ } } } else { iy = ky; if (*beta == 0.) { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = 0.; iy += *incy; /* L30: */ } } else { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = *beta * y[iy]; iy += *incy; /* L40: */ } } } } if (*alpha == 0.) { return 0; } if (lsame_(trans, "N")) { /* Form y := alpha*A*x + y. */ jx = kx; if (*incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { y[i__] += temp * a[i__ + j * a_dim1]; /* L50: */ } } jx += *incx; /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; iy = ky; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { y[iy] += temp * a[i__ + j * a_dim1]; iy += *incy; /* L70: */ } } jx += *incx; /* L80: */ } } } else { /* Form y := alpha*A'*x + y. */ jy = ky; if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = 0.; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } y[jy] += *alpha * temp; jy += *incy; /* L100: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = 0.; ix = kx; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[ix]; ix += *incx; /* L110: */ } y[jy] += *alpha * temp; jy += *incy; /* L120: */ } } } return 0; /* End of DGEMV . */ } /* dgemv_ */ /* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *y, integer *incy, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, ix, jy, kx; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGER performs the rank 1 operation A := alpha*x*y' + A, where alpha is a scalar, x is an m element vector, y is an n element vector and A is an m by n matrix. Arguments ========== M - INTEGER. On entry, M specifies the number of rows of the matrix A. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( m - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the m element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. Unchanged on exit. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry, the leading m by n part of the array A must contain the matrix of coefficients. On exit, A is overwritten by the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, m ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; --y; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (*m < 0) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*incy == 0) { info = 7; } else if (*lda < max(1,*m)) { info = 9; } if (info != 0) { xerbla_("DGER ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0.) { return 0; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (*incy > 0) { jy = 1; } else { jy = 1 - (*n - 1) * *incy; } if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (y[jy] != 0.) { temp = *alpha * y[jy]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L10: */ } } jy += *incy; /* L20: */ } } else { if (*incx > 0) { kx = 1; } else { kx = 1 - (*m - 1) * *incx; } i__1 = *n; for (j = 1; j <= i__1; ++j) { if (y[jy] != 0.) { temp = *alpha * y[jy]; ix = kx; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L30: */ } } jy += *incy; /* L40: */ } } return 0; /* End of DGER . */ } /* dger_ */ doublereal dnrm2_(integer *n, doublereal *x, integer *incx) { /* System generated locals */ integer i__1, i__2; doublereal ret_val, d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal norm, scale, absxi; static integer ix; static doublereal ssq; /* Purpose ======= DNRM2 returns the euclidean norm of a vector via the function name, so that DNRM2 := sqrt( x'*x ) -- This version written on 25-October-1982. Modified on 14-October-1993 to inline the call to DLASSQ. Sven Hammarling, Nag Ltd. */ /* Parameter adjustments */ --x; /* Function Body */ if (*n < 1 || *incx < 1) { norm = 0.; } else if (*n == 1) { norm = abs(x[1]); } else { scale = 0.; ssq = 1.; /* The following loop is equivalent to this call to the LAPACK auxiliary routine: CALL DLASSQ( N, X, INCX, SCALE, SSQ ) */ i__1 = (*n - 1) * *incx + 1; i__2 = *incx; for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { if (x[ix] != 0.) { absxi = (d__1 = x[ix], abs(d__1)); if (scale < absxi) { /* Computing 2nd power */ d__1 = scale / absxi; ssq = ssq * (d__1 * d__1) + 1.; scale = absxi; } else { /* Computing 2nd power */ d__1 = absxi / scale; ssq += d__1 * d__1; } } /* L10: */ } norm = scale * sqrt(ssq); } ret_val = norm; return ret_val; /* End of DNRM2. */ } /* dnrm2_ */ /* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy, doublereal *c__, doublereal *s) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__; static doublereal dtemp; static integer ix, iy; /* Purpose ======= applies a plane rotation. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = *c__ * dx[ix] + *s * dy[iy]; dy[iy] = *c__ * dy[iy] - *s * dx[ix]; dx[ix] = dtemp; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 */ L20: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = *c__ * dx[i__] + *s * dy[i__]; dy[i__] = *c__ * dy[i__] - *s * dx[i__]; dx[i__] = dtemp; /* L30: */ } return 0; } /* drot_ */ /* Subroutine */ int drotg_(doublereal *da, doublereal *db, doublereal *c__, doublereal *s) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal r__, scale, z__, roe; /* Purpose ======= construct givens plane rotation. jack dongarra, linpack, 3/11/78. */ roe = *db; if (abs(*da) > abs(*db)) { roe = *da; } scale = abs(*da) + abs(*db); if (scale != 0.) { goto L10; } *c__ = 1.; *s = 0.; r__ = 0.; z__ = 0.; goto L20; L10: /* Computing 2nd power */ d__1 = *da / scale; /* Computing 2nd power */ d__2 = *db / scale; r__ = scale * sqrt(d__1 * d__1 + d__2 * d__2); r__ = d_sign(&c_b90, &roe) * r__; *c__ = *da / r__; *s = *db / r__; z__ = 1.; if (abs(*da) > abs(*db)) { z__ = *s; } if (abs(*db) >= abs(*da) && *c__ != 0.) { z__ = 1. / *c__; } L20: *da = r__; *db = z__; return 0; } /* drotg_ */ /* Subroutine */ int drotm_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy, doublereal *dparam) { /* Initialized data */ static doublereal zero = 0.; static doublereal two = 2.; /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer i__; static doublereal dflag, w, z__; static integer kx, ky, nsteps; static doublereal dh11, dh12, dh21, dh22; /* Purpose ======= APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN (DY**T) DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY. WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) H=( ) ( ) ( ) ( ) (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM. Arguments ========= N (input) INTEGER number of elements in input vector(s) DX (input/output) DOUBLE PRECISION array, dimension N double precision vector with 5 elements INCX (input) INTEGER storage spacing between elements of DX DY (input/output) DOUBLE PRECISION array, dimension N double precision vector with N elements INCY (input) INTEGER storage spacing between elements of DY DPARAM (input/output) DOUBLE PRECISION array, dimension 5 DPARAM(1)=DFLAG DPARAM(2)=DH11 DPARAM(3)=DH21 DPARAM(4)=DH12 DPARAM(5)=DH22 ===================================================================== */ /* Parameter adjustments */ --dparam; --dy; --dx; /* Function Body */ dflag = dparam[1]; if (*n <= 0 || dflag + two == zero) { goto L140; } if (! (*incx == *incy && *incx > 0)) { goto L70; } nsteps = *n * *incx; if (dflag < 0.) { goto L50; } else if (dflag == 0) { goto L10; } else { goto L30; } L10: dh12 = dparam[4]; dh21 = dparam[3]; i__1 = nsteps; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w + z__ * dh12; dy[i__] = w * dh21 + z__; /* L20: */ } goto L140; L30: dh11 = dparam[2]; dh22 = dparam[5]; i__2 = nsteps; i__1 = *incx; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w * dh11 + z__; dy[i__] = -w + dh22 * z__; /* L40: */ } goto L140; L50: dh11 = dparam[2]; dh12 = dparam[4]; dh21 = dparam[3]; dh22 = dparam[5]; i__1 = nsteps; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w * dh11 + z__ * dh12; dy[i__] = w * dh21 + z__ * dh22; /* L60: */ } goto L140; L70: kx = 1; ky = 1; if (*incx < 0) { kx = (1 - *n) * *incx + 1; } if (*incy < 0) { ky = (1 - *n) * *incy + 1; } if (dflag < 0.) { goto L120; } else if (dflag == 0) { goto L80; } else { goto L100; } L80: dh12 = dparam[4]; dh21 = dparam[3]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w + z__ * dh12; dy[ky] = w * dh21 + z__; kx += *incx; ky += *incy; /* L90: */ } goto L140; L100: dh11 = dparam[2]; dh22 = dparam[5]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w * dh11 + z__; dy[ky] = -w + dh22 * z__; kx += *incx; ky += *incy; /* L110: */ } goto L140; L120: dh11 = dparam[2]; dh12 = dparam[4]; dh21 = dparam[3]; dh22 = dparam[5]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w * dh11 + z__ * dh12; dy[ky] = w * dh21 + z__ * dh22; kx += *incx; ky += *incy; /* L130: */ } L140: return 0; } /* drotm_ */ /* Subroutine */ int drotmg_(doublereal *dd1, doublereal *dd2, doublereal * dx1, doublereal *dy1, doublereal *dparam) { /* Initialized data */ static doublereal zero = 0.; static doublereal one = 1.; static doublereal two = 2.; static doublereal gam = 4096.; static doublereal gamsq = 16777216.; static doublereal rgamsq = 5.9604645e-8; /* Format strings */ static char fmt_120[] = ""; static char fmt_150[] = ""; static char fmt_180[] = ""; static char fmt_210[] = ""; /* System generated locals */ doublereal d__1; /* Local variables */ static doublereal dflag, dtemp, du, dp1, dp2, dq1, dq2, dh11, dh12, dh21, dh22; static integer igo; /* Assigned format variables */ static char *igo_fmt; /* Purpose ======= CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)* DY2)**T. WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) H=( ) ( ) ( ) ( ) (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22 RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.) THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. Arguments ========= DD1 (input/output) DOUBLE PRECISION DD2 (input/output) DOUBLE PRECISION DX1 (input/output) DOUBLE PRECISION DY1 (input) DOUBLE PRECISION DPARAM (input/output) DOUBLE PRECISION array, dimension 5 DPARAM(1)=DFLAG DPARAM(2)=DH11 DPARAM(3)=DH21 DPARAM(4)=DH12 DPARAM(5)=DH22 ===================================================================== */ /* Parameter adjustments */ --dparam; /* Function Body */ if (! (*dd1 < zero)) { goto L10; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L10: /* CASE-DD1-NONNEGATIVE */ dp2 = *dd2 * *dy1; if (! (dp2 == zero)) { goto L20; } dflag = -two; goto L260; /* REGULAR-CASE.. */ L20: dp1 = *dd1 * *dx1; dq2 = dp2 * *dy1; dq1 = dp1 * *dx1; if (! (abs(dq1) > abs(dq2))) { goto L40; } dh21 = -(*dy1) / *dx1; dh12 = dp2 / dp1; du = one - dh12 * dh21; if (! (du <= zero)) { goto L30; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L30: dflag = zero; *dd1 /= du; *dd2 /= du; *dx1 *= du; /* GO SCALE-CHECK.. */ goto L100; L40: if (! (dq2 < zero)) { goto L50; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L50: dflag = one; dh11 = dp1 / dp2; dh22 = *dx1 / *dy1; du = one + dh11 * dh22; dtemp = *dd2 / du; *dd2 = *dd1 / du; *dd1 = dtemp; *dx1 = *dy1 * du; /* GO SCALE-CHECK */ goto L100; /* PROCEDURE..ZERO-H-D-AND-DX1.. */ L60: dflag = -one; dh11 = zero; dh12 = zero; dh21 = zero; dh22 = zero; *dd1 = zero; *dd2 = zero; *dx1 = zero; /* RETURN.. */ goto L220; /* PROCEDURE..FIX-H.. */ L70: if (! (dflag >= zero)) { goto L90; } if (! (dflag == zero)) { goto L80; } dh11 = one; dh22 = one; dflag = -one; goto L90; L80: dh21 = -one; dh12 = one; dflag = -one; L90: switch (igo) { case 0: goto L120; case 1: goto L150; case 2: goto L180; case 3: goto L210; } /* PROCEDURE..SCALE-CHECK */ L100: L110: if (! (*dd1 <= rgamsq)) { goto L130; } if (*dd1 == zero) { goto L160; } igo = 0; igo_fmt = fmt_120; /* FIX-H.. */ goto L70; L120: /* Computing 2nd power */ d__1 = gam; *dd1 *= d__1 * d__1; *dx1 /= gam; dh11 /= gam; dh12 /= gam; goto L110; L130: L140: if (! (*dd1 >= gamsq)) { goto L160; } igo = 1; igo_fmt = fmt_150; /* FIX-H.. */ goto L70; L150: /* Computing 2nd power */ d__1 = gam; *dd1 /= d__1 * d__1; *dx1 *= gam; dh11 *= gam; dh12 *= gam; goto L140; L160: L170: if (! (abs(*dd2) <= rgamsq)) { goto L190; } if (*dd2 == zero) { goto L220; } igo = 2; igo_fmt = fmt_180; /* FIX-H.. */ goto L70; L180: /* Computing 2nd power */ d__1 = gam; *dd2 *= d__1 * d__1; dh21 /= gam; dh22 /= gam; goto L170; L190: L200: if (! (abs(*dd2) >= gamsq)) { goto L220; } igo = 3; igo_fmt = fmt_210; /* FIX-H.. */ goto L70; L210: /* Computing 2nd power */ d__1 = gam; *dd2 /= d__1 * d__1; dh21 *= gam; dh22 *= gam; goto L200; L220: if (dflag < 0.) { goto L250; } else if (dflag == 0) { goto L230; } else { goto L240; } L230: dparam[3] = dh21; dparam[4] = dh12; goto L260; L240: dparam[2] = dh11; dparam[5] = dh22; goto L260; L250: dparam[2] = dh11; dparam[3] = dh21; dparam[4] = dh12; dparam[5] = dh22; L260: dparam[1] = dflag; return 0; } /* drotmg_ */ /* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx, integer *incx) { /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer i__, m, nincx, mp1; /* Purpose ======= * scales a vector by a constant. uses unrolled loops for increment equal to one. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ if (*n <= 0 || *incx <= 0) { return 0; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ nincx = *n * *incx; i__1 = nincx; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { dx[i__] = *da * dx[i__]; /* L10: */ } return 0; /* code for increment equal to 1 clean-up loop */ L20: m = *n % 5; if (m == 0) { goto L40; } i__2 = m; for (i__ = 1; i__ <= i__2; ++i__) { dx[i__] = *da * dx[i__]; /* L30: */ } if (*n < 5) { return 0; } L40: mp1 = m + 1; i__2 = *n; for (i__ = mp1; i__ <= i__2; i__ += 5) { dx[i__] = *da * dx[i__]; dx[i__ + 1] = *da * dx[i__ + 1]; dx[i__ + 2] = *da * dx[i__ + 2]; dx[i__ + 3] = *da * dx[i__ + 3]; dx[i__ + 4] = *da * dx[i__ + 4]; /* L50: */ } return 0; } /* dscal_ */ /* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer ix, iy, mp1; /* Purpose ======= interchanges two vectors. uses unrolled loops for increments equal one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = dx[ix]; dx[ix] = dy[iy]; dy[iy] = dtemp; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 3; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = dx[i__]; dx[i__] = dy[i__]; dy[i__] = dtemp; /* L30: */ } if (*n < 3) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 3) { dtemp = dx[i__]; dx[i__] = dy[i__]; dy[i__] = dtemp; dtemp = dx[i__ + 1]; dx[i__ + 1] = dy[i__ + 1]; dy[i__ + 1] = dtemp; dtemp = dx[i__ + 2]; dx[i__ + 2] = dy[i__ + 2]; dy[i__ + 2] = dtemp; /* L50: */ } return 0; } /* dswap_ */ /* Subroutine */ int dsymm_(char *side, char *uplo, integer *m, integer *n, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j, k; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYMM performs one of the matrix-matrix operations C := alpha*A*B + beta*C, or C := alpha*B*A + beta*C, where alpha and beta are scalars, A is a symmetric matrix and B and C are m by n matrices. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether the symmetric matrix A appears on the left or right in the operation as follows: SIDE = 'L' or 'l' C := alpha*A*B + beta*C, SIDE = 'R' or 'r' C := alpha*B*A + beta*C, Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of the symmetric matrix is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of the symmetric matrix is to be referenced. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix C. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is m when SIDE = 'L' or 'l' and is n otherwise. Before entry with SIDE = 'L' or 'l', the m by m part of the array A must contain the symmetric matrix, such that when UPLO = 'U' or 'u', the leading m by m upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced, and when UPLO = 'L' or 'l', the leading m by m lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Before entry with SIDE = 'R' or 'r', the n by n part of the array A must contain the symmetric matrix, such that when UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced, and when UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Set NROWA as the number of rows of A. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(side, "L")) { nrowa = *m; } else { nrowa = *n; } upper = lsame_(uplo, "U"); /* Test the input parameters. */ info = 0; if (! lsame_(side, "L") && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (*m < 0) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldb < max(1,*m)) { info = 9; } else if (*ldc < max(1,*m)) { info = 12; } if (info != 0) { xerbla_("DSYMM ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } return 0; } /* Start the operations. */ if (lsame_(side, "L")) { /* Form C := alpha*A*B + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp1 = *alpha * b[i__ + j * b_dim1]; temp2 = 0.; i__3 = i__ - 1; for (k = 1; k <= i__3; ++k) { c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1]; temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1]; /* L50: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } /* L60: */ } /* L70: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp1 = *alpha * b[i__ + j * b_dim1]; temp2 = 0.; i__2 = *m; for (k = i__ + 1; k <= i__2; ++k) { c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1]; temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1]; /* L80: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } /* L90: */ } /* L100: */ } } } else { /* Form C := alpha*B*A + beta*C. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * a[j + j * a_dim1]; if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = temp1 * b[i__ + j * b_dim1]; /* L110: */ } } else { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * b[i__ + j * b_dim1]; /* L120: */ } } i__2 = j - 1; for (k = 1; k <= i__2; ++k) { if (upper) { temp1 = *alpha * a[k + j * a_dim1]; } else { temp1 = *alpha * a[j + k * a_dim1]; } i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1]; /* L130: */ } /* L140: */ } i__2 = *n; for (k = j + 1; k <= i__2; ++k) { if (upper) { temp1 = *alpha * a[j + k * a_dim1]; } else { temp1 = *alpha * a[k + j * a_dim1]; } i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1]; /* L150: */ } /* L160: */ } /* L170: */ } } return 0; /* End of DSYMM . */ } /* dsymm_ */ /* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal *beta, doublereal *y, integer *incy) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYMV performs the matrix-vector operation y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; --y; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*lda < max(1,*n)) { info = 5; } else if (*incx == 0) { info = 7; } else if (*incy == 0) { info = 10; } if (info != 0) { xerbla_("DSYMV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* Set up the start points in X and Y. */ if (*incx > 0) { kx = 1; } else { kx = 1 - (*n - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (*n - 1) * *incy; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. First form y := beta*y. */ if (*beta != 1.) { if (*incy == 1) { if (*beta == 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = 0.; /* L10: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = *beta * y[i__]; /* L20: */ } } } else { iy = ky; if (*beta == 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = 0.; iy += *incy; /* L30: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = *beta * y[iy]; iy += *incy; /* L40: */ } } } } if (*alpha == 0.) { return 0; } if (lsame_(uplo, "U")) { /* Form y when A is stored in upper triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[j]; temp2 = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { y[i__] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[i__]; /* L50: */ } y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2; /* L60: */ } } else { jx = kx; jy = ky; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[jx]; temp2 = 0.; ix = kx; iy = ky; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { y[iy] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[ix]; ix += *incx; iy += *incy; /* L70: */ } y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2; jx += *incx; jy += *incy; /* L80: */ } } } else { /* Form y when A is stored in lower triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[j]; temp2 = 0.; y[j] += temp1 * a[j + j * a_dim1]; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { y[i__] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } y[j] += *alpha * temp2; /* L100: */ } } else { jx = kx; jy = ky; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[jx]; temp2 = 0.; y[jy] += temp1 * a[j + j * a_dim1]; ix = jx; iy = jy; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; iy += *incy; y[iy] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[ix]; /* L110: */ } y[jy] += *alpha * temp2; jx += *incx; jy += *incy; /* L120: */ } } } return 0; /* End of DSYMV . */ } /* dsymv_ */ /* Subroutine */ int dsyr_(char *uplo, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR performs the symmetric rank 1 operation A := alpha*x*x' + A, where alpha is a real scalar, x is an n element vector and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. On exit, the upper triangular part of the array A is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. On exit, the lower triangular part of the array A is overwritten by the lower triangular part of the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*lda < max(1,*n)) { info = 7; } if (info != 0) { xerbla_("DSYR ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0.) { return 0; } /* Set the start point in X if the increment is not unity. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. */ if (lsame_(uplo, "U")) { /* Form A when A is stored in upper triangle. */ if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = *alpha * x[j]; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L10: */ } } /* L20: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; ix = kx; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L30: */ } } jx += *incx; /* L40: */ } } } else { /* Form A when A is stored in lower triangle. */ if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = *alpha * x[j]; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L50: */ } } /* L60: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; ix = jx; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L70: */ } } jx += *incx; /* L80: */ } } } return 0; /* End of DSYR . */ } /* dsyr_ */ /* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *y, integer *incy, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR2 performs the symmetric rank 2 operation A := alpha*x*y' + alpha*y*x' + A, where alpha is a scalar, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. Unchanged on exit. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. On exit, the upper triangular part of the array A is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. On exit, the lower triangular part of the array A is overwritten by the lower triangular part of the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; --y; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*incy == 0) { info = 7; } else if (*lda < max(1,*n)) { info = 9; } if (info != 0) { xerbla_("DSYR2 ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0.) { return 0; } /* Set up the start points in X and Y if the increments are not both unity. */ if (*incx != 1 || *incy != 1) { if (*incx > 0) { kx = 1; } else { kx = 1 - (*n - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (*n - 1) * *incy; } jx = kx; jy = ky; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. */ if (lsame_(uplo, "U")) { /* Form A when A is stored in the upper triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0. || y[j] != 0.) { temp1 = *alpha * y[j]; temp2 = *alpha * x[j]; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * temp1 + y[i__] * temp2; /* L10: */ } } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0. || y[jy] != 0.) { temp1 = *alpha * y[jy]; temp2 = *alpha * x[jx]; ix = kx; iy = ky; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * temp1 + y[iy] * temp2; ix += *incx; iy += *incy; /* L30: */ } } jx += *incx; jy += *incy; /* L40: */ } } } else { /* Form A when A is stored in the lower triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0. || y[j] != 0.) { temp1 = *alpha * y[j]; temp2 = *alpha * x[j]; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * temp1 + y[i__] * temp2; /* L50: */ } } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0. || y[jy] != 0.) { temp1 = *alpha * y[jy]; temp2 = *alpha * x[jx]; ix = jx; iy = jy; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * temp1 + y[iy] * temp2; ix += *incx; iy += *incy; /* L70: */ } } jx += *incx; jy += *incy; /* L80: */ } } } return 0; /* End of DSYR2 . */ } /* dsyr2_ */ /* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j, l; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR2K performs one of the symmetric rank 2k operations C := alpha*A*B' + alpha*B*A' + beta*C, or C := alpha*A'*B + alpha*B'*A + beta*C, where alpha and beta are scalars, C is an n by n symmetric matrix and A and B are n by k matrices in the first case and k by n matrices in the second case. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array C is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of C is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of C is to be referenced. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + beta*C. TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + beta*C. TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A + beta*C. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry with TRANS = 'N' or 'n', K specifies the number of columns of the matrices A and B, and on entry with TRANS = 'T' or 't' or 'C' or 'c', K specifies the number of rows of the matrices A and B. K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array A must contain the matrix A, otherwise the leading k by n part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDA must be at least max( 1, n ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array B must contain the matrix B, otherwise the leading k by n part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDB must be at least max( 1, n ), otherwise LDB must be at least max( 1, k ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, n ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(trans, "N")) { nrowa = *n; } else { nrowa = *k; } upper = lsame_(uplo, "U"); info = 0; if (! upper && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (*n < 0) { info = 3; } else if (*k < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldb < max(1,nrowa)) { info = 9; } else if (*ldc < max(1,*n)) { info = 12; } if (info != 0) { xerbla_("DSYR2K", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (upper) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } } else { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L70: */ } /* L80: */ } } } return 0; } /* Start the operations. */ if (lsame_(trans, "N")) { /* Form C := alpha*A*B' + alpha*B*A' + C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L90: */ } } else if (*beta != 1.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L100: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { temp1 = *alpha * b[j + l * b_dim1]; temp2 = *alpha * a[j + l * a_dim1]; i__3 = j; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ i__ + l * a_dim1] * temp1 + b[i__ + l * b_dim1] * temp2; /* L110: */ } } /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L140: */ } } else if (*beta != 1.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L150: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { temp1 = *alpha * b[j + l * b_dim1]; temp2 = *alpha * a[j + l * a_dim1]; i__3 = *n; for (i__ = j; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ i__ + l * a_dim1] * temp1 + b[i__ + l * b_dim1] * temp2; /* L160: */ } } /* L170: */ } /* L180: */ } } } else { /* Form C := alpha*A'*B + alpha*B'*A + C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { temp1 = 0.; temp2 = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; /* L190: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + *alpha * temp1 + *alpha * temp2; } /* L200: */ } /* L210: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { temp1 = 0.; temp2 = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; /* L220: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + *alpha * temp1 + *alpha * temp2; } /* L230: */ } /* L240: */ } } } return 0; /* End of DSYR2K. */ } /* dsyr2k_ */ /* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, l; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYRK performs one of the symmetric rank k operations C := alpha*A*A' + beta*C, or C := alpha*A'*A + beta*C, where alpha and beta are scalars, C is an n by n symmetric matrix and A is an n by k matrix in the first case and a k by n matrix in the second case. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array C is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of C is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of C is to be referenced. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. TRANS = 'T' or 't' C := alpha*A'*A + beta*C. TRANS = 'C' or 'c' C := alpha*A'*A + beta*C. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry with TRANS = 'N' or 'n', K specifies the number of columns of the matrix A, and on entry with TRANS = 'T' or 't' or 'C' or 'c', K specifies the number of rows of the matrix A. K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array A must contain the matrix A, otherwise the leading k by n part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDA must be at least max( 1, n ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, n ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(trans, "N")) { nrowa = *n; } else { nrowa = *k; } upper = lsame_(uplo, "U"); info = 0; if (! upper && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (*n < 0) { info = 3; } else if (*k < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldc < max(1,*n)) { info = 10; } if (info != 0) { xerbla_("DSYRK ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (upper) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } } else { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L70: */ } /* L80: */ } } } return 0; } /* Start the operations. */ if (lsame_(trans, "N")) { /* Form C := alpha*A*A' + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L90: */ } } else if (*beta != 1.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L100: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0.) { temp = *alpha * a[j + l * a_dim1]; i__3 = j; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L110: */ } } /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L140: */ } } else if (*beta != 1.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L150: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0.) { temp = *alpha * a[j + l * a_dim1]; i__3 = *n; for (i__ = j; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L160: */ } } /* L170: */ } /* L180: */ } } } else { /* Form C := alpha*A'*A + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; /* L190: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L200: */ } /* L210: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; /* L220: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L230: */ } /* L240: */ } } } return 0; /* End of DSYRK . */ } /* dsyrk_ */ /* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag, integer *m, integer *n, doublereal *alpha, doublereal *a, integer * lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, k; static logical lside; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRMM performs one of the matrix-matrix operations B := alpha*op( A )*B, or B := alpha*B*op( A ), where alpha is a scalar, B is an m by n matrix, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A'. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether op( A ) multiplies B from the left or right as follows: SIDE = 'L' or 'l' B := alpha*op( A )*B. SIDE = 'R' or 'r' B := alpha*B*op( A ). Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix A is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n' op( A ) = A. TRANSA = 'T' or 't' op( A ) = A'. TRANSA = 'C' or 'c' op( A ) = A'. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of B. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of B. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. Before entry with UPLO = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' then LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the matrix B, and on exit is overwritten by the transformed matrix. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ lside = lsame_(side, "L"); if (lside) { nrowa = *m; } else { nrowa = *n; } nounit = lsame_(diag, "N"); upper = lsame_(uplo, "U"); info = 0; if (! lside && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (! lsame_(transa, "N") && ! lsame_(transa, "T") && ! lsame_(transa, "C")) { info = 3; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 4; } else if (*m < 0) { info = 5; } else if (*n < 0) { info = 6; } else if (*lda < max(1,nrowa)) { info = 9; } else if (*ldb < max(1,*m)) { info = 11; } if (info != 0) { xerbla_("DTRMM ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = 0.; /* L10: */ } /* L20: */ } return 0; } /* Start the operations. */ if (lside) { if (lsame_(transa, "N")) { /* Form B := alpha*A*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (k = 1; k <= i__2; ++k) { if (b[k + j * b_dim1] != 0.) { temp = *alpha * b[k + j * b_dim1]; i__3 = k - 1; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * a[i__ + k * a_dim1]; /* L30: */ } if (nounit) { temp *= a[k + k * a_dim1]; } b[k + j * b_dim1] = temp; } /* L40: */ } /* L50: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (k = *m; k >= 1; --k) { if (b[k + j * b_dim1] != 0.) { temp = *alpha * b[k + j * b_dim1]; b[k + j * b_dim1] = temp; if (nounit) { b[k + j * b_dim1] *= a[k + k * a_dim1]; } i__2 = *m; for (i__ = k + 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * a[i__ + k * a_dim1]; /* L60: */ } } /* L70: */ } /* L80: */ } } } else { /* Form B := alpha*A'*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp = b[i__ + j * b_dim1]; if (nounit) { temp *= a[i__ + i__ * a_dim1]; } i__2 = i__ - 1; for (k = 1; k <= i__2; ++k) { temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L90: */ } b[i__ + j * b_dim1] = *alpha * temp; /* L100: */ } /* L110: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = b[i__ + j * b_dim1]; if (nounit) { temp *= a[i__ + i__ * a_dim1]; } i__3 = *m; for (k = i__ + 1; k <= i__3; ++k) { temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L120: */ } b[i__ + j * b_dim1] = *alpha * temp; /* L130: */ } /* L140: */ } } } } else { if (lsame_(transa, "N")) { /* Form B := alpha*B*A. */ if (upper) { for (j = *n; j >= 1; --j) { temp = *alpha; if (nounit) { temp *= a[j + j * a_dim1]; } i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L150: */ } i__1 = j - 1; for (k = 1; k <= i__1; ++k) { if (a[k + j * a_dim1] != 0.) { temp = *alpha * a[k + j * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L160: */ } } /* L170: */ } /* L180: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = *alpha; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L190: */ } i__2 = *n; for (k = j + 1; k <= i__2; ++k) { if (a[k + j * a_dim1] != 0.) { temp = *alpha * a[k + j * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L200: */ } } /* L210: */ } /* L220: */ } } } else { /* Form B := alpha*B*A'. */ if (upper) { i__1 = *n; for (k = 1; k <= i__1; ++k) { i__2 = k - 1; for (j = 1; j <= i__2; ++j) { if (a[j + k * a_dim1] != 0.) { temp = *alpha * a[j + k * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L230: */ } } /* L240: */ } temp = *alpha; if (nounit) { temp *= a[k + k * a_dim1]; } if (temp != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L250: */ } } /* L260: */ } } else { for (k = *n; k >= 1; --k) { i__1 = *n; for (j = k + 1; j <= i__1; ++j) { if (a[j + k * a_dim1] != 0.) { temp = *alpha * a[j + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L270: */ } } /* L280: */ } temp = *alpha; if (nounit) { temp *= a[k + k * a_dim1]; } if (temp != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L290: */ } } /* L300: */ } } } } return 0; /* End of DTRMM . */ } /* dtrmm_ */ /* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRMV performs one of the matrix-vector operations x := A*x, or x := A'*x, where x is an n element vector and A is an n by n unit, or non-unit, upper or lower triangular matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' x := A*x. TRANS = 'T' or 't' x := A'*x. TRANS = 'C' or 'c' x := A'*x. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,*n)) { info = 6; } else if (*incx == 0) { info = 8; } if (info != 0) { xerbla_("DTRMV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } nounit = lsame_(diag, "N"); /* Set up the start point in X if the increment is not unity. This will be ( N - 1 )*INCX too small for descending loops. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (lsame_(trans, "N")) { /* Form x := A*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = x[j]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { x[i__] += temp * a[i__ + j * a_dim1]; /* L10: */ } if (nounit) { x[j] *= a[j + j * a_dim1]; } } /* L20: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = x[jx]; ix = kx; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { x[ix] += temp * a[i__ + j * a_dim1]; ix += *incx; /* L30: */ } if (nounit) { x[jx] *= a[j + j * a_dim1]; } } jx += *incx; /* L40: */ } } } else { if (*incx == 1) { for (j = *n; j >= 1; --j) { if (x[j] != 0.) { temp = x[j]; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { x[i__] += temp * a[i__ + j * a_dim1]; /* L50: */ } if (nounit) { x[j] *= a[j + j * a_dim1]; } } /* L60: */ } } else { kx += (*n - 1) * *incx; jx = kx; for (j = *n; j >= 1; --j) { if (x[jx] != 0.) { temp = x[jx]; ix = kx; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { x[ix] += temp * a[i__ + j * a_dim1]; ix -= *incx; /* L70: */ } if (nounit) { x[jx] *= a[j + j * a_dim1]; } } jx -= *incx; /* L80: */ } } } } else { /* Form x := A'*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { for (j = *n; j >= 1; --j) { temp = x[j]; if (nounit) { temp *= a[j + j * a_dim1]; } for (i__ = j - 1; i__ >= 1; --i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } x[j] = temp; /* L100: */ } } else { jx = kx + (*n - 1) * *incx; for (j = *n; j >= 1; --j) { temp = x[jx]; ix = jx; if (nounit) { temp *= a[j + j * a_dim1]; } for (i__ = j - 1; i__ >= 1; --i__) { ix -= *incx; temp += a[i__ + j * a_dim1] * x[ix]; /* L110: */ } x[jx] = temp; jx -= *incx; /* L120: */ } } } else { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[j]; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L130: */ } x[j] = temp; /* L140: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[jx]; ix = jx; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; temp += a[i__ + j * a_dim1] * x[ix]; /* L150: */ } x[jx] = temp; jx += *incx; /* L160: */ } } } } return 0; /* End of DTRMV . */ } /* dtrmv_ */ /* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag, integer *m, integer *n, doublereal *alpha, doublereal *a, integer * lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, k; static logical lside; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRSM solves one of the matrix equations op( A )*X = alpha*B, or X*op( A ) = alpha*B, where alpha is a scalar, X and B are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A'. The matrix X is overwritten on B. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether op( A ) appears on the left or right of X as follows: SIDE = 'L' or 'l' op( A )*X = alpha*B. SIDE = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix A is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n' op( A ) = A. TRANSA = 'T' or 't' op( A ) = A'. TRANSA = 'C' or 'c' op( A ) = A'. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of B. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of B. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. Before entry with UPLO = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' then LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ lside = lsame_(side, "L"); if (lside) { nrowa = *m; } else { nrowa = *n; } nounit = lsame_(diag, "N"); upper = lsame_(uplo, "U"); info = 0; if (! lside && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (! lsame_(transa, "N") && ! lsame_(transa, "T") && ! lsame_(transa, "C")) { info = 3; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 4; } else if (*m < 0) { info = 5; } else if (*n < 0) { info = 6; } else if (*lda < max(1,nrowa)) { info = 9; } else if (*ldb < max(1,*m)) { info = 11; } if (info != 0) { xerbla_("DTRSM ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = 0.; /* L10: */ } /* L20: */ } return 0; } /* Start the operations. */ if (lside) { if (lsame_(transa, "N")) { /* Form B := alpha*inv( A )*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L30: */ } } for (k = *m; k >= 1; --k) { if (b[k + j * b_dim1] != 0.) { if (nounit) { b[k + j * b_dim1] /= a[k + k * a_dim1]; } i__2 = k - 1; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ i__ + k * a_dim1]; /* L40: */ } } /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L70: */ } } i__2 = *m; for (k = 1; k <= i__2; ++k) { if (b[k + j * b_dim1] != 0.) { if (nounit) { b[k + j * b_dim1] /= a[k + k * a_dim1]; } i__3 = *m; for (i__ = k + 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ i__ + k * a_dim1]; /* L80: */ } } /* L90: */ } /* L100: */ } } } else { /* Form B := alpha*inv( A' )*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = *alpha * b[i__ + j * b_dim1]; i__3 = i__ - 1; for (k = 1; k <= i__3; ++k) { temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L110: */ } if (nounit) { temp /= a[i__ + i__ * a_dim1]; } b[i__ + j * b_dim1] = temp; /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp = *alpha * b[i__ + j * b_dim1]; i__2 = *m; for (k = i__ + 1; k <= i__2; ++k) { temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L140: */ } if (nounit) { temp /= a[i__ + i__ * a_dim1]; } b[i__ + j * b_dim1] = temp; /* L150: */ } /* L160: */ } } } } else { if (lsame_(transa, "N")) { /* Form B := alpha*B*inv( A ). */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L170: */ } } i__2 = j - 1; for (k = 1; k <= i__2; ++k) { if (a[k + j * a_dim1] != 0.) { i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ i__ + k * b_dim1]; /* L180: */ } } /* L190: */ } if (nounit) { temp = 1. / a[j + j * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L200: */ } } /* L210: */ } } else { for (j = *n; j >= 1; --j) { if (*alpha != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L220: */ } } i__1 = *n; for (k = j + 1; k <= i__1; ++k) { if (a[k + j * a_dim1] != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ i__ + k * b_dim1]; /* L230: */ } } /* L240: */ } if (nounit) { temp = 1. / a[j + j * a_dim1]; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L250: */ } } /* L260: */ } } } else { /* Form B := alpha*B*inv( A' ). */ if (upper) { for (k = *n; k >= 1; --k) { if (nounit) { temp = 1. / a[k + k * a_dim1]; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L270: */ } } i__1 = k - 1; for (j = 1; j <= i__1; ++j) { if (a[j + k * a_dim1] != 0.) { temp = a[j + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= temp * b[i__ + k * b_dim1]; /* L280: */ } } /* L290: */ } if (*alpha != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] ; /* L300: */ } } /* L310: */ } } else { i__1 = *n; for (k = 1; k <= i__1; ++k) { if (nounit) { temp = 1. / a[k + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L320: */ } } i__2 = *n; for (j = k + 1; j <= i__2; ++j) { if (a[j + k * a_dim1] != 0.) { temp = a[j + k * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= temp * b[i__ + k * b_dim1]; /* L330: */ } } /* L340: */ } if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] ; /* L350: */ } } /* L360: */ } } } } return 0; /* End of DTRSM . */ } /* dtrsm_ */ /* Subroutine */ int dtrsv_(char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRSV solves one of the systems of equations A*x = b, or A'*x = b, where b and x are n element vectors and A is an n by n unit, or non-unit, upper or lower triangular matrix. No test for singularity or near-singularity is included in this routine. Such tests must be performed before calling this routine. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the equations to be solved as follows: TRANS = 'N' or 'n' A*x = b. TRANS = 'T' or 't' A'*x = b. TRANS = 'C' or 'c' A'*x = b. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element right-hand side vector b. On exit, X is overwritten with the solution vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,*n)) { info = 6; } else if (*incx == 0) { info = 8; } if (info != 0) { xerbla_("DTRSV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } nounit = lsame_(diag, "N"); /* Set up the start point in X if the increment is not unity. This will be ( N - 1 )*INCX too small for descending loops. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (lsame_(trans, "N")) { /* Form x := inv( A )*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { for (j = *n; j >= 1; --j) { if (x[j] != 0.) { if (nounit) { x[j] /= a[j + j * a_dim1]; } temp = x[j]; for (i__ = j - 1; i__ >= 1; --i__) { x[i__] -= temp * a[i__ + j * a_dim1]; /* L10: */ } } /* L20: */ } } else { jx = kx + (*n - 1) * *incx; for (j = *n; j >= 1; --j) { if (x[jx] != 0.) { if (nounit) { x[jx] /= a[j + j * a_dim1]; } temp = x[jx]; ix = jx; for (i__ = j - 1; i__ >= 1; --i__) { ix -= *incx; x[ix] -= temp * a[i__ + j * a_dim1]; /* L30: */ } } jx -= *incx; /* L40: */ } } } else { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { if (nounit) { x[j] /= a[j + j * a_dim1]; } temp = x[j]; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { x[i__] -= temp * a[i__ + j * a_dim1]; /* L50: */ } } /* L60: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { if (nounit) { x[jx] /= a[j + j * a_dim1]; } temp = x[jx]; ix = jx; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; x[ix] -= temp * a[i__ + j * a_dim1]; /* L70: */ } } jx += *incx; /* L80: */ } } } } else { /* Form x := inv( A' )*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[j]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { temp -= a[i__ + j * a_dim1] * x[i__]; /* L90: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[j] = temp; /* L100: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[jx]; ix = kx; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { temp -= a[i__ + j * a_dim1] * x[ix]; ix += *incx; /* L110: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[jx] = temp; jx += *incx; /* L120: */ } } } else { if (*incx == 1) { for (j = *n; j >= 1; --j) { temp = x[j]; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { temp -= a[i__ + j * a_dim1] * x[i__]; /* L130: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[j] = temp; /* L140: */ } } else { kx += (*n - 1) * *incx; jx = kx; for (j = *n; j >= 1; --j) { temp = x[jx]; ix = kx; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { temp -= a[i__ + j * a_dim1] * x[ix]; ix -= *incx; /* L150: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[jx] = temp; jx -= *incx; /* L160: */ } } } } return 0; /* End of DTRSV . */ } /* dtrsv_ */ integer idamax_(integer *n, doublereal *dx, integer *incx) { /* System generated locals */ integer ret_val, i__1; doublereal d__1; /* Local variables */ static doublereal dmax__; static integer i__, ix; /* Purpose ======= finds the index of element having max. absolute value. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ ret_val = 0; if (*n < 1 || *incx <= 0) { return ret_val; } ret_val = 1; if (*n == 1) { return ret_val; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ ix = 1; dmax__ = abs(dx[1]); ix += *incx; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { if ((d__1 = dx[ix], abs(d__1)) <= dmax__) { goto L5; } ret_val = i__; dmax__ = (d__1 = dx[ix], abs(d__1)); L5: ix += *incx; /* L10: */ } return ret_val; /* code for increment equal to 1 */ L20: dmax__ = abs(dx[1]); i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { if ((d__1 = dx[i__], abs(d__1)) <= dmax__) { goto L30; } ret_val = i__; dmax__ = (d__1 = dx[i__], abs(d__1)); L30: ; } return ret_val; } /* idamax_ */ logical lsame_(char *ca, char *cb) { /* System generated locals */ logical ret_val; /* Local variables */ static integer inta, intb, zcode; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= LSAME returns .TRUE. if CA is the same letter as CB regardless of case. Arguments ========= CA (input) CHARACTER*1 CB (input) CHARACTER*1 CA and CB specify the single characters to be compared. ===================================================================== Test if the characters are equal */ ret_val = *(unsigned char *)ca == *(unsigned char *)cb; if (ret_val) { return ret_val; } /* Now test for equivalence if both characters are alphabetic. */ zcode = 'Z'; /* Use 'Z' rather than 'A' so that ASCII can be detected on Prime machines, on which ICHAR returns a value with bit 8 set. ICHAR('A') on Prime machines returns 193 which is the same as ICHAR('A') on an EBCDIC machine. */ inta = *(unsigned char *)ca; intb = *(unsigned char *)cb; if (zcode == 90 || zcode == 122) { /* ASCII is assumed - ZCODE is the ASCII code of either lower or upper case 'Z'. */ if (inta >= 97 && inta <= 122) { inta += -32; } if (intb >= 97 && intb <= 122) { intb += -32; } } else if (zcode == 233 || zcode == 169) { /* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or upper case 'Z'. */ if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta >= 162 && inta <= 169) { inta += 64; } if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb >= 162 && intb <= 169) { intb += 64; } } else if (zcode == 218 || zcode == 250) { /* ASCII is assumed, on Prime machines - ZCODE is the ASCII code plus 128 of either lower or upper case 'Z'. */ if (inta >= 225 && inta <= 250) { inta += -32; } if (intb >= 225 && intb <= 250) { intb += -32; } } ret_val = inta == intb; /* RETURN End of LSAME */ return ret_val; } /* lsame_ */ /* Subroutine */ int xerbla_(char *srname, integer *info) { /* Format strings */ static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu" "mber \002,i2,\002 had \002,\002an illegal value\002)"; /* Builtin functions */ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); /* Subroutine */ int s_stop(char *, ftnlen); /* Fortran I/O blocks */ static cilist io___197 = { 0, 6, 0, fmt_9999, 0 }; /* -- LAPACK auxiliary routine (preliminary version) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= XERBLA is an error handler for the LAPACK routines. It is called by an LAPACK routine if an input parameter has an invalid value. A message is printed and execution stops. Installers may consider modifying the STOP statement in order to call system-specific exception-handling facilities. Arguments ========= SRNAME (input) CHARACTER*6 The name of the routine which called XERBLA. INFO (input) INTEGER The position of the invalid parameter in the parameter list of the calling routine. */ s_wsfe(&io___197); do_fio(&c__1, srname, (ftnlen)6); do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer)); e_wsfe(); s_stop("", (ftnlen)0); /* End of XERBLA */ return 0; } /* xerbla_ */ nipy-0.6.1/lib/lapack_lite/dlamch.c000066400000000000000000000562771470056100100171220ustar00rootroot00000000000000#include #include "f2c.h" /* If config.h is available, we only need dlamc3 */ #ifndef HAVE_CONFIG doublereal dlamch_(char *cmach) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMCH determines double precision machine parameters. Arguments ========= CMACH (input) CHARACTER*1 Specifies the value to be returned by DLAMCH: = 'E' or 'e', DLAMCH := eps = 'S' or 's , DLAMCH := sfmin = 'B' or 'b', DLAMCH := base = 'P' or 'p', DLAMCH := eps*base = 'N' or 'n', DLAMCH := t = 'R' or 'r', DLAMCH := rnd = 'M' or 'm', DLAMCH := emin = 'U' or 'u', DLAMCH := rmin = 'L' or 'l', DLAMCH := emax = 'O' or 'o', DLAMCH := rmax where eps = relative machine precision sfmin = safe minimum, such that 1/sfmin does not overflow base = base of the machine prec = eps*base t = number of (base) digits in the mantissa rnd = 1.0 when rounding occurs in addition, 0.0 otherwise emin = minimum exponent before (gradual) underflow rmin = underflow threshold - base**(emin-1) emax = largest exponent before overflow rmax = overflow threshold - (base**emax)*(1-eps) ===================================================================== */ /* >>Start of File<< Initialized data */ static logical first = TRUE_; /* System generated locals */ integer i__1; doublereal ret_val; /* Builtin functions */ double pow_di(doublereal *, integer *); /* Local variables */ static doublereal base; static integer beta; static doublereal emin, prec, emax; static integer imin, imax; static logical lrnd; static doublereal rmin, rmax, t, rmach; extern logical lsame_(char *, char *); static doublereal small, sfmin; extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *, doublereal *, integer *, doublereal *, integer *, doublereal *); static integer it; static doublereal rnd, eps; if (first) { first = FALSE_; dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax); base = (doublereal) beta; t = (doublereal) it; if (lrnd) { rnd = 1.; i__1 = 1 - it; eps = pow_di(&base, &i__1) / 2; } else { rnd = 0.; i__1 = 1 - it; eps = pow_di(&base, &i__1); } prec = eps * base; emin = (doublereal) imin; emax = (doublereal) imax; sfmin = rmin; small = 1. / rmax; if (small >= sfmin) { /* Use SMALL plus a bit, to avoid the possibility of rou nding causing overflow when computing 1/sfmin. */ sfmin = small * (eps + 1.); } } if (lsame_(cmach, "E")) { rmach = eps; } else if (lsame_(cmach, "S")) { rmach = sfmin; } else if (lsame_(cmach, "B")) { rmach = base; } else if (lsame_(cmach, "P")) { rmach = prec; } else if (lsame_(cmach, "N")) { rmach = t; } else if (lsame_(cmach, "R")) { rmach = rnd; } else if (lsame_(cmach, "M")) { rmach = emin; } else if (lsame_(cmach, "U")) { rmach = rmin; } else if (lsame_(cmach, "L")) { rmach = emax; } else if (lsame_(cmach, "O")) { rmach = rmax; } ret_val = rmach; return ret_val; /* End of DLAMCH */ } /* dlamch_ */ /* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical *ieee1) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC1 determines the machine parameters given by BETA, T, RND, and IEEE1. Arguments ========= BETA (output) INTEGER The base of the machine. T (output) INTEGER The number of ( BETA ) digits in the mantissa. RND (output) LOGICAL Specifies whether proper rounding ( RND = .TRUE. ) or chopping ( RND = .FALSE. ) occurs in addition. This may not be a reliable guide to the way in which the machine performs its arithmetic. IEEE1 (output) LOGICAL Specifies whether rounding appears to be done in the IEEE 'round to nearest' style. Further Details =============== The routine is based on the routine ENVRON by Malcolm and incorporates suggestions by Gentleman and Marovich. See Malcolm M. A. (1972) Algorithms to reveal properties of floating-point arithmetic. Comms. of the ACM, 15, 949-951. Gentleman W. M. and Marovich S. B. (1974) More on algorithms that reveal properties of floating point arithmetic units. Comms. of the ACM, 17, 276-277. ===================================================================== */ /* Initialized data */ static logical first = TRUE_; /* System generated locals */ doublereal d__1, d__2; /* Local variables */ static logical lrnd; static doublereal a, b, c, f; static integer lbeta; static doublereal savec; extern doublereal dlamc3_(doublereal *, doublereal *); static logical lieee1; static doublereal t1, t2; static integer lt; static doublereal one, qtr; if (first) { first = FALSE_; one = 1.; /* LBETA, LIEEE1, LT and LRND are the local values of BE TA, IEEE1, T and RND. Throughout this routine we use the function DLAMC3 to ens ure that relevant values are stored and not held in registers, or are not affected by optimizers. Compute a = 2.0**m with the smallest positive integer m s uch that fl( a + 1.0 ) = a. */ a = 1.; c = 1.; /* + WHILE( C.EQ.ONE )LOOP */ L10: if (c == one) { a *= 2; c = dlamc3_(&a, &one); d__1 = -a; c = dlamc3_(&c, &d__1); goto L10; } /* + END WHILE Now compute b = 2.0**m with the smallest positive integer m such that fl( a + b ) .gt. a. */ b = 1.; c = dlamc3_(&a, &b); /* + WHILE( C.EQ.A )LOOP */ L20: if (c == a) { b *= 2; c = dlamc3_(&a, &b); goto L20; } /* + END WHILE Now compute the base. a and c are neighbouring floating po int numbers in the interval ( beta**t, beta**( t + 1 ) ) and so their difference is beta. Adding 0.25 to c is to ensure that it is truncated to beta and not ( beta - 1 ). */ qtr = one / 4; savec = c; d__1 = -a; c = dlamc3_(&c, &d__1); lbeta = (integer) (c + qtr); /* Now determine whether rounding or chopping occurs, by addin g a bit less than beta/2 and a bit more than beta/2 to a. */ b = (doublereal) lbeta; d__1 = b / 2; d__2 = -b / 100; f = dlamc3_(&d__1, &d__2); c = dlamc3_(&f, &a); if (c == a) { lrnd = TRUE_; } else { lrnd = FALSE_; } d__1 = b / 2; d__2 = b / 100; f = dlamc3_(&d__1, &d__2); c = dlamc3_(&f, &a); if (lrnd && c == a) { lrnd = FALSE_; } /* Try and decide whether rounding is done in the IEEE 'round to nearest' style. B/2 is half a unit in the last place of the two numbers A and SAVEC. Furthermore, A is even, i.e. has last bit zero, and SAVEC is odd. Thus adding B/2 to A should not cha nge A, but adding B/2 to SAVEC should change SAVEC. */ d__1 = b / 2; t1 = dlamc3_(&d__1, &a); d__1 = b / 2; t2 = dlamc3_(&d__1, &savec); lieee1 = t1 == a && t2 > savec && lrnd; /* Now find the mantissa, t. It should be the integer part of log to the base beta of a, however it is safer to determine t by powering. So we find t as the smallest positive integer for which fl( beta**t + 1.0 ) = 1.0. */ lt = 0; a = 1.; c = 1.; /* + WHILE( C.EQ.ONE )LOOP */ L30: if (c == one) { ++lt; a *= lbeta; c = dlamc3_(&a, &one); d__1 = -a; c = dlamc3_(&c, &d__1); goto L30; } /* + END WHILE */ } *beta = lbeta; *t = lt; *rnd = lrnd; *ieee1 = lieee1; return 0; /* End of DLAMC1 */ } /* dlamc1_ */ /* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd, doublereal *eps, integer *emin, doublereal *rmin, integer *emax, doublereal *rmax) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC2 determines the machine parameters specified in its argument list. Arguments ========= BETA (output) INTEGER The base of the machine. T (output) INTEGER The number of ( BETA ) digits in the mantissa. RND (output) LOGICAL Specifies whether proper rounding ( RND = .TRUE. ) or chopping ( RND = .FALSE. ) occurs in addition. This may not be a reliable guide to the way in which the machine performs its arithmetic. EPS (output) DOUBLE PRECISION The smallest positive number such that fl( 1.0 - EPS ) .LT. 1.0, where fl denotes the computed value. EMIN (output) INTEGER The minimum exponent before (gradual) underflow occurs. RMIN (output) DOUBLE PRECISION The smallest normalized number for the machine, given by BASE**( EMIN - 1 ), where BASE is the floating point value of BETA. EMAX (output) INTEGER The maximum exponent before overflow occurs. RMAX (output) DOUBLE PRECISION The largest positive number for the machine, given by BASE**EMAX * ( 1 - EPS ), where BASE is the floating point value of BETA. Further Details =============== The computation of EPS is based on a routine PARANOIA by W. Kahan of the University of California at Berkeley. ===================================================================== */ /* Initialized data */ static logical first = TRUE_; static logical iwarn = FALSE_; /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3, d__4, d__5; /* Builtin functions */ double pow_di(doublereal *, integer *); /* Local variables */ static logical ieee; static doublereal half; static logical lrnd; static doublereal leps, zero, a, b, c; static integer i, lbeta; static doublereal rbase; static integer lemin, lemax, gnmin; static doublereal small; static integer gpmin; static doublereal third, lrmin, lrmax, sixth; extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *, logical *); extern doublereal dlamc3_(doublereal *, doublereal *); static logical lieee1; extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *), dlamc5_(integer *, integer *, integer *, logical *, integer *, doublereal *); static integer lt, ngnmin, ngpmin; static doublereal one, two; if (first) { first = FALSE_; zero = 0.; one = 1.; two = 2.; /* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values of BETA, T, RND, EPS, EMIN and RMIN. Throughout this routine we use the function DLAMC3 to ens ure that relevant values are stored and not held in registers, or are not affected by optimizers. DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. */ dlamc1_(&lbeta, <, &lrnd, &lieee1); /* Start to find EPS. */ b = (doublereal) lbeta; i__1 = -lt; a = pow_di(&b, &i__1); leps = a; /* Try some tricks to see whether or not this is the correct E PS. */ b = two / 3; half = one / 2; d__1 = -half; sixth = dlamc3_(&b, &d__1); third = dlamc3_(&sixth, &sixth); d__1 = -half; b = dlamc3_(&third, &d__1); b = dlamc3_(&b, &sixth); b = abs(b); if (b < leps) { b = leps; } leps = 1.; /* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */ L10: if (leps > b && b > zero) { leps = b; d__1 = half * leps; /* Computing 5th power */ d__3 = two, d__4 = d__3, d__3 *= d__3; /* Computing 2nd power */ d__5 = leps; d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5); c = dlamc3_(&d__1, &d__2); d__1 = -c; c = dlamc3_(&half, &d__1); b = dlamc3_(&half, &c); d__1 = -b; c = dlamc3_(&half, &d__1); b = dlamc3_(&half, &c); goto L10; } /* + END WHILE */ if (a < leps) { leps = a; } /* Computation of EPS complete. Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 )). Keep dividing A by BETA until (gradual) underflow occurs. T his is detected when we cannot recover the previous A. */ rbase = one / lbeta; small = one; for (i = 1; i <= 3; ++i) { d__1 = small * rbase; small = dlamc3_(&d__1, &zero); /* L20: */ } a = dlamc3_(&one, &small); dlamc4_(&ngpmin, &one, &lbeta); d__1 = -one; dlamc4_(&ngnmin, &d__1, &lbeta); dlamc4_(&gpmin, &a, &lbeta); d__1 = -a; dlamc4_(&gnmin, &d__1, &lbeta); ieee = FALSE_; if (ngpmin == ngnmin && gpmin == gnmin) { if (ngpmin == gpmin) { lemin = ngpmin; /* ( Non twos-complement machines, no gradual under flow; e.g., VAX ) */ } else if (gpmin - ngpmin == 3) { lemin = ngpmin - 1 + lt; ieee = TRUE_; /* ( Non twos-complement machines, with gradual und erflow; e.g., IEEE standard followers ) */ } else { lemin = min(ngpmin,gpmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else if (ngpmin == gpmin && ngnmin == gnmin) { if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { lemin = max(ngpmin,ngnmin); /* ( Twos-complement machines, no gradual underflow ; e.g., CYBER 205 ) */ } else { lemin = min(ngpmin,ngnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin) { if (gpmin - min(ngpmin,ngnmin) == 3) { lemin = max(ngpmin,ngnmin) - 1 + lt; /* ( Twos-complement machines with gradual underflo w; no known machine ) */ } else { lemin = min(ngpmin,ngnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else { /* Computing MIN */ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin); lemin = min(i__1,gnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } /* ** Comment out this if block if EMIN is ok */ if (iwarn) { first = TRUE_; printf("\n\n WARNING. The value EMIN may be incorrect:- "); printf("EMIN = %8i\n",lemin); printf("If, after inspection, the value EMIN looks acceptable"); printf("please comment out \n the IF block as marked within the"); printf("code of routine DLAMC2, \n otherwise supply EMIN"); printf("explicitly.\n"); } /* ** Assume IEEE arithmetic if we found denormalised numbers abo ve, or if arithmetic seems to round in the IEEE style, determi ned in routine DLAMC1. A true IEEE machine should have both thi ngs true; however, faulty machines may have one or the other. */ ieee = ieee || lieee1; /* Compute RMIN by successive division by BETA. We could comp ute RMIN as BASE**( EMIN - 1 ), but some machines underflow dur ing this computation. */ lrmin = 1.; i__1 = 1 - lemin; for (i = 1; i <= 1-lemin; ++i) { d__1 = lrmin * rbase; lrmin = dlamc3_(&d__1, &zero); /* L30: */ } /* Finally, call DLAMC5 to compute EMAX and RMAX. */ dlamc5_(&lbeta, <, &lemin, &ieee, &lemax, &lrmax); } *beta = lbeta; *t = lt; *rnd = lrnd; *eps = leps; *emin = lemin; *rmin = lrmin; *emax = lemax; *rmax = lrmax; return 0; /* End of DLAMC2 */ } /* dlamc2_ */ #endif doublereal dlamc3_(doublereal *a, doublereal *b) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC3 is intended to force A and B to be stored prior to doing the addition of A and B , for use in situations where optimizers might hold one of these in a register. Arguments ========= A, B (input) DOUBLE PRECISION The values A and B. ===================================================================== */ /* >>Start of File<< System generated locals */ volatile doublereal ret_val; ret_val = *a + *b; return ret_val; /* End of DLAMC3 */ } /* dlamc3_ */ #ifndef HAVE_CONFIG /* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base) { /* -- LAPACK auxiliary routine (version 2.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC4 is a service routine for DLAMC2. Arguments ========= EMIN (output) EMIN The minimum exponent before (gradual) underflow, computed by setting A = START and dividing by BASE until the previous A can not be recovered. START (input) DOUBLE PRECISION The starting point for determining EMIN. BASE (input) INTEGER The base of the machine. ===================================================================== */ /* System generated locals */ integer i__1; doublereal d__1; /* Local variables */ static doublereal zero, a; static integer i; static doublereal rbase, b1, b2, c1, c2, d1, d2; extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal one; a = *start; one = 1.; rbase = one / *base; zero = 0.; *emin = 1; d__1 = a * rbase; b1 = dlamc3_(&d__1, &zero); c1 = a; c2 = a; d1 = a; d2 = a; /* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ L10: if (c1 == a && c2 == a && d1 == a && d2 == a) { --(*emin); a = b1; d__1 = a / *base; b1 = dlamc3_(&d__1, &zero); d__1 = b1 * *base; c1 = dlamc3_(&d__1, &zero); d1 = zero; i__1 = *base; for (i = 1; i <= *base; ++i) { d1 += b1; /* L20: */ } d__1 = a * rbase; b2 = dlamc3_(&d__1, &zero); d__1 = b2 / rbase; c2 = dlamc3_(&d__1, &zero); d2 = zero; i__1 = *base; for (i = 1; i <= *base; ++i) { d2 += b2; /* L30: */ } goto L10; } /* + END WHILE */ return 0; /* End of DLAMC4 */ } /* dlamc4_ */ /* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin, logical *ieee, integer *emax, doublereal *rmax) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC5 attempts to compute RMAX, the largest machine floating-point number, without overflow. It assumes that EMAX + abs(EMIN) sum approximately to a power of 2. It will fail on machines where this assumption does not hold, for example, the Cyber 205 (EMIN = -28625, EMAX = 28718). It will also fail if the value supplied for EMIN is too large (i.e. too close to zero), probably with overflow. Arguments ========= BETA (input) INTEGER The base of floating-point arithmetic. P (input) INTEGER The number of base BETA digits in the mantissa of a floating-point value. EMIN (input) INTEGER The minimum exponent before (gradual) underflow. IEEE (input) LOGICAL A logical flag specifying whether or not the arithmetic system is thought to comply with the IEEE standard. EMAX (output) INTEGER The largest exponent before overflow RMAX (output) DOUBLE PRECISION The largest machine floating-point number. ===================================================================== First compute LEXP and UEXP, two powers of 2 that bound abs(EMIN). We then assume that EMAX + abs(EMIN) will sum approximately to the bound that is closest to abs(EMIN). (EMAX is the exponent of the required number RMAX). */ /* Table of constant values */ static doublereal c_b5 = 0.; /* System generated locals */ integer i__1; doublereal d__1; /* Local variables */ static integer lexp; static doublereal oldy; static integer uexp, i; static doublereal y, z; static integer nbits; extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal recbas; static integer exbits, expsum, try__; lexp = 1; exbits = 1; L10: try__ = lexp << 1; if (try__ <= -(*emin)) { lexp = try__; ++exbits; goto L10; } if (lexp == -(*emin)) { uexp = lexp; } else { uexp = try__; ++exbits; } /* Now -LEXP is less than or equal to EMIN, and -UEXP is greater than or equal to EMIN. EXBITS is the number of bits needed to store the exponent. */ if (uexp + *emin > -lexp - *emin) { expsum = lexp << 1; } else { expsum = uexp << 1; } /* EXPSUM is the exponent range, approximately equal to EMAX - EMIN + 1 . */ *emax = expsum + *emin - 1; nbits = exbits + 1 + *p; /* NBITS is the total number of bits needed to store a floating-point number. */ if (nbits % 2 == 1 && *beta == 2) { /* Either there are an odd number of bits used to store a floating-point number, which is unlikely, or some bits are not used in the representation of numbers, which is possible , (e.g. Cray machines) or the mantissa has an implicit bit, (e.g. IEEE machines, Dec Vax machines), which is perhaps the most likely. We have to assume the last alternative. If this is true, then we need to reduce EMAX by one because there must be some way of representing zero in an implicit-b it system. On machines like Cray, we are reducing EMAX by one unnecessarily. */ --(*emax); } if (*ieee) { /* Assume we are on an IEEE machine which reserves one exponent for infinity and NaN. */ --(*emax); } /* Now create RMAX, the largest machine number, which should be equal to (1.0 - BETA**(-P)) * BETA**EMAX . First compute 1.0 - BETA**(-P), being careful that the result is less than 1.0 . */ recbas = 1. / *beta; z = *beta - 1.; y = 0.; i__1 = *p; for (i = 1; i <= *p; ++i) { z *= recbas; if (y < 1.) { oldy = y; } y = dlamc3_(&y, &z); /* L20: */ } if (y >= 1.) { y = oldy; } /* Now multiply by BETA**EMAX to get RMAX. */ i__1 = *emax; for (i = 1; i <= *emax; ++i) { d__1 = y * *beta; y = dlamc3_(&d__1, &c_b5); /* L30: */ } *rmax = y; return 0; /* End of DLAMC5 */ } /* dlamc5_ */ #endif nipy-0.6.1/lib/lapack_lite/dlapack_lite.c000066400000000000000000043446771470056100100203160ustar00rootroot00000000000000/* NOTE: This is generated code. Look in Misc/lapack_lite for information on remaking this file. */ #include "f2c.h" #ifdef HAVE_CONFIG #include "config.h" #else extern doublereal dlamch_(char *); #define EPSILON dlamch_("Epsilon") #define SAFEMINIMUM dlamch_("Safe minimum") #define PRECISION dlamch_("Precision") #define BASE dlamch_("Base") #endif extern doublereal dlapy2_(doublereal *x, doublereal *y); /* Table of constant values */ static integer c__9 = 9; static integer c__0 = 0; static doublereal c_b15 = 1.; static integer c__1 = 1; static doublereal c_b29 = 0.; static doublereal c_b94 = -.125; static doublereal c_b151 = -1.; static integer c_n1 = -1; static integer c__3 = 3; static integer c__2 = 2; static integer c__65 = 65; static integer c__6 = 6; static integer c__12 = 12; static integer c__49 = 49; static integer c__4 = 4; static logical c_false = FALSE_; static integer c__13 = 13; static integer c__15 = 15; static integer c__14 = 14; static integer c__16 = 16; static logical c_true = TRUE_; static integer c__10 = 10; static integer c__11 = 11; static doublereal c_b3176 = 2.; static real c_b4270 = 0.f; static real c_b4271 = 1.f; /* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal * d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer * iwork, integer *info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double d_sign(doublereal *, doublereal *), log(doublereal); /* Local variables */ static integer difl, difr, ierr, perm, mlvl, sqre, i__, j, k; static doublereal p, r__; static integer z__; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer * , doublereal *, integer *), dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer poles, iuplo, nsize, start; extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer ic, ii, kk; static doublereal cs; extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer is, iu; static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static integer givcol; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); static integer icompq; static doublereal orgnrm; static integer givnum, givptr, nm1, qstart, smlsiz, wstart, smlszp; static doublereal eps; static integer ivt; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DBDSDC computes the singular value decomposition (SVD) of a real N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, using a divide and conquer method, where S is a diagonal matrix with non-negative diagonal elements (the singular values of B), and U and VT are orthogonal matrices of left and right singular vectors, respectively. DBDSDC can be used to compute all singular values, and optionally, singular vectors or singular vectors in compact form. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. See DLASD3 for details. The code currently calls DLASDQ if singular values only are desired. However, it can be slightly modified to compute singular values using the divide and conquer method. Arguments ========= UPLO (input) CHARACTER*1 = 'U': B is upper bidiagonal. = 'L': B is lower bidiagonal. COMPQ (input) CHARACTER*1 Specifies whether singular vectors are to be computed as follows: = 'N': Compute singular values only; = 'P': Compute singular values and compute singular vectors in compact form; = 'I': Compute singular values and singular vectors. N (input) INTEGER The order of the matrix B. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the bidiagonal matrix B. On exit, if INFO=0, the singular values of B. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the elements of E contain the offdiagonal elements of the bidiagonal matrix whose SVD is desired. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension (LDU,N) If COMPQ = 'I', then: On exit, if INFO = 0, U contains the left singular vectors of the bidiagonal matrix. For other values of COMPQ, U is not referenced. LDU (input) INTEGER The leading dimension of the array U. LDU >= 1. If singular vectors are desired, then LDU >= max( 1, N ). VT (output) DOUBLE PRECISION array, dimension (LDVT,N) If COMPQ = 'I', then: On exit, if INFO = 0, VT' contains the right singular vectors of the bidiagonal matrix. For other values of COMPQ, VT is not referenced. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= 1. If singular vectors are desired, then LDVT >= max( 1, N ). Q (output) DOUBLE PRECISION array, dimension (LDQ) If COMPQ = 'P', then: On exit, if INFO = 0, Q and IQ contain the left and right singular vectors in a compact form, requiring O(N log N) space instead of 2*N**2. In particular, Q contains all the DOUBLE PRECISION data in LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1)))) words of memory, where SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25). For other values of COMPQ, Q is not referenced. IQ (output) INTEGER array, dimension (LDIQ) If COMPQ = 'P', then: On exit, if INFO = 0, Q and IQ contain the left and right singular vectors in a compact form, requiring O(N log N) space instead of 2*N**2. In particular, IQ contains all INTEGER data in LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1)))) words of memory, where SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25). For other values of COMPQ, IQ is not referenced. WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) If COMPQ = 'N' then LWORK >= (4 * N). If COMPQ = 'P' then LWORK >= (6 * N). If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N). IWORK (workspace) INTEGER array, dimension (8*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an singular value. The update process of divide and conquer failed. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Changed dimension statement in comment describing E from (N) to (N-1). Sven, 17 Feb 05. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --q; --iq; --work; --iwork; /* Function Body */ *info = 0; iuplo = 0; if (lsame_(uplo, "U")) { iuplo = 1; } if (lsame_(uplo, "L")) { iuplo = 2; } if (lsame_(compq, "N")) { icompq = 0; } else if (lsame_(compq, "P")) { icompq = 1; } else if (lsame_(compq, "I")) { icompq = 2; } else { icompq = -1; } if (iuplo == 0) { *info = -1; } else if (icompq < 0) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ldu < 1 || icompq == 2 && *ldu < *n) { *info = -7; } else if (*ldvt < 1 || icompq == 2 && *ldvt < *n) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DBDSDC", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); if (*n == 1) { if (icompq == 1) { q[1] = d_sign(&c_b15, &d__[1]); q[smlsiz * *n + 1] = 1.; } else if (icompq == 2) { u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]); vt[vt_dim1 + 1] = 1.; } d__[1] = abs(d__[1]); return 0; } nm1 = *n - 1; /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left */ wstart = 1; qstart = 3; if (icompq == 1) { dcopy_(n, &d__[1], &c__1, &q[1], &c__1); i__1 = *n - 1; dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1); } if (iuplo == 2) { qstart = 5; wstart = (*n << 1) - 1; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (icompq == 1) { q[i__ + (*n << 1)] = cs; q[i__ + *n * 3] = sn; } else if (icompq == 2) { work[i__] = cs; work[nm1 + i__] = -sn; } /* L10: */ } } /* If ICOMPQ = 0, use DLASDQ to compute the singular values. */ if (icompq == 0) { dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ wstart], info); goto L40; } /* If N is smaller than the minimum divide size SMLSIZ, then solve the problem with another solver. */ if (*n <= smlsiz) { if (icompq == 2) { dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset] , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ wstart], info); } else if (icompq == 1) { iu = 1; ivt = iu + *n; dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n); dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n); dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + ( qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[ iu + (qstart - 1) * *n], n, &work[wstart], info); } goto L40; } if (icompq == 2) { dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); } /* Scale. */ orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { return 0; } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, & ierr); eps = EPSILON; mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) / log(2.)) + 1; smlszp = smlsiz + 1; if (icompq == 1) { iu = 1; ivt = smlsiz + 1; difl = ivt + smlszp; difr = difl + mlvl; z__ = difr + (mlvl << 1); ic = z__ + mlvl; is = ic + 1; poles = is + 1; givnum = poles + (mlvl << 1); k = 1; givptr = 2; perm = 3; givcol = perm + mlvl; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) < eps) { d__[i__] = d_sign(&eps, &d__[i__]); } /* L20: */ } start = 1; sqre = 0; i__1 = nm1; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { /* Subproblem found. First determine its size and then apply divide and conquer on it. */ if (i__ < nm1) { /* A subproblem with E(I) small for I < NM1. */ nsize = i__ - start + 1; } else if ((d__1 = e[i__], abs(d__1)) >= eps) { /* A subproblem with E(NM1) not too small but I = NM1. */ nsize = *n - start + 1; } else { /* A subproblem with E(NM1) small. This implies an 1-by-1 subproblem at D(N). Solve this 1-by-1 problem first. */ nsize = i__ - start + 1; if (icompq == 2) { u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]); vt[*n + *n * vt_dim1] = 1.; } else if (icompq == 1) { q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]); q[*n + (smlsiz + qstart - 1) * *n] = 1.; } d__[*n] = (d__1 = d__[*n], abs(d__1)); } if (icompq == 2) { dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start + start * u_dim1], ldu, &vt[start + start * vt_dim1], ldvt, &smlsiz, &iwork[1], &work[wstart], info); } else { dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[ start], &q[start + (iu + qstart - 2) * *n], n, &q[ start + (ivt + qstart - 2) * *n], &iq[start + k * *n], &q[start + (difl + qstart - 2) * *n], &q[start + ( difr + qstart - 2) * *n], &q[start + (z__ + qstart - 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[ start + givptr * *n], &iq[start + givcol * *n], n, & iq[start + perm * *n], &q[start + (givnum + qstart - 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[ start + (is + qstart - 2) * *n], &work[wstart], & iwork[1], info); if (*info != 0) { return 0; } } start = i__ + 1; } /* L30: */ } /* Unscale */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr); L40: /* Use Selection Sort to minimize swaps of singular vectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; kk = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] > p) { kk = j; p = d__[j]; } /* L50: */ } if (kk != i__) { d__[kk] = d__[i__]; d__[i__] = p; if (icompq == 1) { iq[i__] = kk; } else if (icompq == 2) { dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], & c__1); dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt); } } else if (icompq == 1) { iq[i__] = i__; } /* L60: */ } /* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */ if (icompq == 1) { if (iuplo == 1) { iq[*n] = 1; } else { iq[*n] = 0; } } /* If B is lower bidiagonal, update U by those Givens rotations which rotated B to be upper bidiagonal */ if (iuplo == 2 && icompq == 2) { dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu); } return 0; /* End of DBDSDC */ } /* dbdsdc_ */ /* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer * nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt, integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer * ldc, doublereal *work, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign( doublereal *, doublereal *); /* Local variables */ static doublereal abse; static integer idir; static doublereal abss; static integer oldm; static doublereal cosl; static integer isub, iter; static doublereal unfl, sinl, cosr, smin, smax, sinr; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *), dlas2_( doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal f, g, h__; static integer i__, j, m; static doublereal r__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); static doublereal oldcs; extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static integer oldll; static doublereal shift, sigmn, oldsn; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer maxit; static doublereal sminl, sigmx; static logical lower; extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *, doublereal *, integer *), dlasv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal cs; static integer ll; static doublereal sn, mu; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *); static doublereal sminoa, thresh; static logical rotate; static integer nm1; static doublereal tolmul; static integer nm12, nm13, lll; static doublereal eps, sll, tol; /* -- LAPACK routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. January 2007 Purpose ======= DBDSQR computes the singular values and, optionally, the right and/or left singular vectors from the singular value decomposition (SVD) of a real N-by-N (upper or lower) bidiagonal matrix B using the implicit zero-shift QR algorithm. The SVD of B has the form B = Q * S * P**T where S is the diagonal matrix of singular values, Q is an orthogonal matrix of left singular vectors, and P is an orthogonal matrix of right singular vectors. If left singular vectors are requested, this subroutine actually returns U*Q instead of Q, and, if right singular vectors are requested, this subroutine returns P**T*VT instead of P**T, for given real input matrices U and VT. When U and VT are the orthogonal matrices that reduce a general matrix A to bidiagonal form: A = U*B*VT, as computed by DGEBRD, then A = (U*Q) * S * (P**T*VT) is the SVD of A. Optionally, the subroutine may also compute Q**T*C for a given real input matrix C. See "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, no. 5, pp. 873-912, Sept 1990) and "Accurate singular values and differential qd algorithms," by B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics Department, University of California at Berkeley, July 1992 for a detailed description of the algorithm. Arguments ========= UPLO (input) CHARACTER*1 = 'U': B is upper bidiagonal; = 'L': B is lower bidiagonal. N (input) INTEGER The order of the matrix B. N >= 0. NCVT (input) INTEGER The number of columns of the matrix VT. NCVT >= 0. NRU (input) INTEGER The number of rows of the matrix U. NRU >= 0. NCC (input) INTEGER The number of columns of the matrix C. NCC >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the bidiagonal matrix B. On exit, if INFO=0, the singular values of B in decreasing order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the N-1 offdiagonal elements of the bidiagonal matrix B. On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E will contain the diagonal and superdiagonal elements of a bidiagonal matrix orthogonally equivalent to the one given as input. VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) On entry, an N-by-NCVT matrix VT. On exit, VT is overwritten by P**T * VT. Not referenced if NCVT = 0. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. U (input/output) DOUBLE PRECISION array, dimension (LDU, N) On entry, an NRU-by-N matrix U. On exit, U is overwritten by U * Q. Not referenced if NRU = 0. LDU (input) INTEGER The leading dimension of the array U. LDU >= max(1,NRU). C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) On entry, an N-by-NCC matrix C. On exit, C is overwritten by Q**T * C. Not referenced if NCC = 0. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. WORK (workspace) DOUBLE PRECISION array, dimension (2*N) if NCVT = NRU = NCC = 0, (max(1, 4*N)) otherwise INFO (output) INTEGER = 0: successful exit < 0: If INFO = -i, the i-th argument had an illegal value > 0: the algorithm did not converge; D and E contain the elements of a bidiagonal matrix which is orthogonally similar to the input matrix B; if INFO = i, i elements of E have not converged to zero. Internal Parameters =================== TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8))) TOLMUL controls the convergence criterion of the QR loop. If it is positive, TOLMUL*EPS is the desired relative precision in the computed singular values. If it is negative, abs(TOLMUL*EPS*sigma_max) is the desired absolute accuracy in the computed singular values (corresponds to relative accuracy abs(TOLMUL*EPS) in the largest singular value. abs(TOLMUL) should be between 1 and 1/EPS, and preferably between 10 (for fast convergence) and .1/EPS (for there to be some accuracy in the results). Default is to lose at either one eighth or 2 of the available decimal digits in each computed singular value (whichever is smaller). MAXITR INTEGER, default = 6 MAXITR controls the maximum number of passes of the algorithm through its inner loop. The algorithms stops (and so fails to converge) if the number of passes through the inner loop exceeds MAXITR*N**2. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; lower = lsame_(uplo, "L"); if (! lsame_(uplo, "U") && ! lower) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ncvt < 0) { *info = -3; } else if (*nru < 0) { *info = -4; } else if (*ncc < 0) { *info = -5; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -9; } else if (*ldu < max(1,*nru)) { *info = -11; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -13; } if (*info != 0) { i__1 = -(*info); xerbla_("DBDSQR", &i__1); return 0; } if (*n == 0) { return 0; } if (*n == 1) { goto L160; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; /* If no singular vectors desired, use qd algorithm */ if (! rotate) { dlasq1_(n, &d__[1], &e[1], &work[1], info); return 0; } nm1 = *n - 1; nm12 = nm1 + nm1; nm13 = nm12 + nm1; idir = 0; /* Get machine constants */ eps = EPSILON; unfl = SAFEMINIMUM; /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left */ if (lower) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; work[i__] = cs; work[nm1 + i__] = sn; /* L10: */ } /* Update singular vectors if desired */ if (*nru > 0) { dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], ldu); } if (*ncc > 0) { dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset], ldc); } } /* Compute singular values to relative accuracy TOL (By setting TOL to be negative, algorithm will compute singular values to absolute accuracy ABS(TOL)*norm(input matrix)) Computing MAX Computing MIN */ d__3 = 100., d__4 = pow_dd(&eps, &c_b94); d__1 = 10., d__2 = min(d__3,d__4); tolmul = max(d__1,d__2); tol = tolmul * eps; /* Compute approximate maximum, minimum singular values */ smax = 0.; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1)); smax = max(d__2,d__3); /* L20: */ } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1)); smax = max(d__2,d__3); /* L30: */ } sminl = 0.; if (tol >= 0.) { /* Relative accuracy desired */ sminoa = abs(d__[1]); if (sminoa == 0.) { goto L50; } mu = sminoa; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1] , abs(d__1)))); sminoa = min(sminoa,mu); if (sminoa == 0.) { goto L50; } /* L40: */ } L50: sminoa /= sqrt((doublereal) (*n)); /* Computing MAX */ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl; thresh = max(d__1,d__2); } else { /* Absolute accuracy desired Computing MAX */ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl; thresh = max(d__1,d__2); } /* Prepare for main iteration loop for the singular values (MAXIT is the maximum number of passes through the inner loop permitted before nonconvergence signalled.) */ maxit = *n * 6 * *n; iter = 0; oldll = -1; oldm = -1; /* M points to last element of unconverged part of matrix */ m = *n; /* Begin main iteration loop */ L60: /* Check for convergence or exceeding iteration count */ if (m <= 1) { goto L160; } if (iter > maxit) { goto L200; } /* Find diagonal block of matrix to work on */ if (tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh) { d__[m] = 0.; } smax = (d__1 = d__[m], abs(d__1)); smin = smax; i__1 = m - 1; for (lll = 1; lll <= i__1; ++lll) { ll = m - lll; abss = (d__1 = d__[ll], abs(d__1)); abse = (d__1 = e[ll], abs(d__1)); if (tol < 0. && abss <= thresh) { d__[ll] = 0.; } if (abse <= thresh) { goto L80; } smin = min(smin,abss); /* Computing MAX */ d__1 = max(smax,abss); smax = max(d__1,abse); /* L70: */ } ll = 0; goto L90; L80: e[ll] = 0.; /* Matrix splits since E(LL) = 0 */ if (ll == m - 1) { /* Convergence of bottom singular value, return to top of loop */ --m; goto L60; } L90: ++ll; /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ if (ll == m - 1) { /* 2 by 2 block, handle separately */ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl); d__[m - 1] = sigmx; e[m - 1] = 0.; d__[m] = sigmn; /* Compute singular vectors, if desired */ if (*ncvt > 0) { drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, & cosr, &sinr); } if (*nru > 0) { drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], & c__1, &cosl, &sinl); } if (*ncc > 0) { drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, & cosl, &sinl); } m += -2; goto L60; } /* If working on new submatrix, choose shift direction (from larger end diagonal element towards smaller) */ if (ll > oldm || m < oldll) { if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) { /* Chase bulge from top (big end) to bottom (small end) */ idir = 1; } else { /* Chase bulge from bottom (big end) to top (small end) */ idir = 2; } } /* Apply convergence tests */ if (idir == 1) { /* Run convergence test in forward direction First apply standard test to bottom of matrix */ if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs( d__1)) || tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh) { e[m - 1] = 0.; goto L60; } if (tol >= 0.) { /* If relative accuracy desired, apply convergence criterion forward */ mu = (d__1 = d__[ll], abs(d__1)); sminl = mu; i__1 = m - 1; for (lll = ll; lll <= i__1; ++lll) { if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { e[lll] = 0.; goto L60; } mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[ lll], abs(d__1)))); sminl = min(sminl,mu); /* L100: */ } } } else { /* Run convergence test in backward direction First apply standard test to top of matrix */ if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1) ) || tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh) { e[ll] = 0.; goto L60; } if (tol >= 0.) { /* If relative accuracy desired, apply convergence criterion backward */ mu = (d__1 = d__[m], abs(d__1)); sminl = mu; i__1 = ll; for (lll = m - 1; lll >= i__1; --lll) { if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { e[lll] = 0.; goto L60; } mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll] , abs(d__1)))); sminl = min(sminl,mu); /* L110: */ } } } oldll = ll; oldm = m; /* Compute shift. First, test if shifting would ruin relative accuracy, and if so set the shift to zero. Computing MAX */ d__1 = eps, d__2 = tol * .01; if (tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2)) { /* Use a zero shift to avoid loss of relative accuracy */ shift = 0.; } else { /* Compute the shift from 2-by-2 block at end of matrix */ if (idir == 1) { sll = (d__1 = d__[ll], abs(d__1)); dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); } else { sll = (d__1 = d__[m], abs(d__1)); dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); } /* Test if shift negligible, and if so set to zero */ if (sll > 0.) { /* Computing 2nd power */ d__1 = shift / sll; if (d__1 * d__1 < eps) { shift = 0.; } } } /* Increment iteration count */ iter = iter + m - ll; /* If SHIFT = 0, do simplified QR iteration */ if (shift == 0.) { if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ cs = 1.; oldcs = 1.; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { d__1 = d__[i__] * cs; dlartg_(&d__1, &e[i__], &cs, &sn, &r__); if (i__ > ll) { e[i__ - 1] = oldsn * r__; } d__1 = oldcs * r__; d__2 = d__[i__ + 1] * sn; dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); work[i__ - ll + 1] = cs; work[i__ - ll + 1 + nm1] = sn; work[i__ - ll + 1 + nm12] = oldcs; work[i__ - ll + 1 + nm13] = oldsn; /* L120: */ } h__ = d__[m] * cs; d__[m] = h__ * oldcs; e[m - 1] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { e[m - 1] = 0.; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ cs = 1.; oldcs = 1.; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { d__1 = d__[i__] * cs; dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__); if (i__ < m) { e[i__] = oldsn * r__; } d__1 = oldcs * r__; d__2 = d__[i__ - 1] * sn; dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); work[i__ - ll] = cs; work[i__ - ll + nm1] = -sn; work[i__ - ll + nm12] = oldcs; work[i__ - ll + nm13] = -oldsn; /* L130: */ } h__ = d__[ll] * cs; d__[ll] = h__ * oldcs; e[ll] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[ll], abs(d__1)) <= thresh) { e[ll] = 0.; } } } else { /* Use nonzero shift */ if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[ ll]) + shift / d__[ll]); g = e[ll]; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { dlartg_(&f, &g, &cosr, &sinr, &r__); if (i__ > ll) { e[i__ - 1] = r__; } f = cosr * d__[i__] + sinr * e[i__]; e[i__] = cosr * e[i__] - sinr * d__[i__]; g = sinr * d__[i__ + 1]; d__[i__ + 1] = cosr * d__[i__ + 1]; dlartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__] + sinl * d__[i__ + 1]; d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; if (i__ < m - 1) { g = sinl * e[i__ + 1]; e[i__ + 1] = cosl * e[i__ + 1]; } work[i__ - ll + 1] = cosr; work[i__ - ll + 1 + nm1] = sinr; work[i__ - ll + 1 + nm12] = cosl; work[i__ - ll + 1 + nm13] = sinl; /* L140: */ } e[m - 1] = f; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { e[m - 1] = 0.; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[m] ) + shift / d__[m]); g = e[m - 1]; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { dlartg_(&f, &g, &cosr, &sinr, &r__); if (i__ < m) { e[i__] = r__; } f = cosr * d__[i__] + sinr * e[i__ - 1]; e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; g = sinr * d__[i__ - 1]; d__[i__ - 1] = cosr * d__[i__ - 1]; dlartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; if (i__ > ll + 1) { g = sinl * e[i__ - 2]; e[i__ - 2] = cosl * e[i__ - 2]; } work[i__ - ll] = cosr; work[i__ - ll + nm1] = -sinr; work[i__ - ll + nm12] = cosl; work[i__ - ll + nm13] = -sinl; /* L150: */ } e[ll] = f; /* Test convergence */ if ((d__1 = e[ll], abs(d__1)) <= thresh) { e[ll] = 0.; } /* Update singular vectors if desired */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ ll + c_dim1], ldc); } } } /* QR iteration finished, go back and check convergence */ goto L60; /* All singular values converged, so make them positive */ L160: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] < 0.) { d__[i__] = -d__[i__]; /* Change sign of singular vectors, if desired */ if (*ncvt > 0) { dscal_(ncvt, &c_b151, &vt[i__ + vt_dim1], ldvt); } } /* L170: */ } /* Sort the singular values into decreasing order (insertion sort on singular values, but only one transposition per singular vector) */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I) */ isub = 1; smin = d__[1]; i__2 = *n + 1 - i__; for (j = 2; j <= i__2; ++j) { if (d__[j] <= smin) { isub = j; smin = d__[j]; } /* L180: */ } if (isub != *n + 1 - i__) { /* Swap singular values and vectors */ d__[isub] = d__[*n + 1 - i__]; d__[*n + 1 - i__] = smin; if (*ncvt > 0) { dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + vt_dim1], ldvt); } if (*nru > 0) { dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * u_dim1 + 1], &c__1); } if (*ncc > 0) { dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + c_dim1], ldc); } } /* L190: */ } goto L220; /* Maximum number of iterations exceeded, failure to converge */ L200: *info = 0; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L210: */ } L220: return 0; /* End of DBDSQR */ } /* dbdsqr_ */ /* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo, integer *ihi, doublereal *scale, integer *m, doublereal *v, integer * ldv, integer *info) { /* System generated locals */ integer v_dim1, v_offset, i__1; /* Local variables */ static integer i__, k; static doublereal s; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static logical leftv; static integer ii; extern /* Subroutine */ int xerbla_(char *, integer *); static logical rightv; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBAK forms the right or left eigenvectors of a real general matrix by backward transformation on the computed eigenvectors of the balanced matrix output by DGEBAL. Arguments ========= JOB (input) CHARACTER*1 Specifies the type of backward transformation required: = 'N', do nothing, return immediately; = 'P', do backward transformation for permutation only; = 'S', do backward transformation for scaling only; = 'B', do backward transformations for both permutation and scaling. JOB must be the same as the argument JOB supplied to DGEBAL. SIDE (input) CHARACTER*1 = 'R': V contains right eigenvectors; = 'L': V contains left eigenvectors. N (input) INTEGER The number of rows of the matrix V. N >= 0. ILO (input) INTEGER IHI (input) INTEGER The integers ILO and IHI determined by DGEBAL. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. SCALE (input) DOUBLE PRECISION array, dimension (N) Details of the permutation and scaling factors, as returned by DGEBAL. M (input) INTEGER The number of columns of the matrix V. M >= 0. V (input/output) DOUBLE PRECISION array, dimension (LDV,M) On entry, the matrix of right or left eigenvectors to be transformed, as returned by DHSEIN or DTREVC. On exit, V is overwritten by the transformed eigenvectors. LDV (input) INTEGER The leading dimension of the array V. LDV >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. ===================================================================== Decode and Test the input parameters */ /* Parameter adjustments */ --scale; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; /* Function Body */ rightv = lsame_(side, "R"); leftv = lsame_(side, "L"); *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (! rightv && ! leftv) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*m < 0) { *info = -7; } else if (*ldv < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEBAK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*m == 0) { return 0; } if (lsame_(job, "N")) { return 0; } if (*ilo == *ihi) { goto L30; } /* Backward balance */ if (lsame_(job, "S") || lsame_(job, "B")) { if (rightv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = scale[i__]; dscal_(m, &s, &v[i__ + v_dim1], ldv); /* L10: */ } } if (leftv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = 1. / scale[i__]; dscal_(m, &s, &v[i__ + v_dim1], ldv); /* L20: */ } } } /* Backward permutation For I = ILO-1 step -1 until 1, IHI+1 step 1 until N do -- */ L30: if (lsame_(job, "P") || lsame_(job, "B")) { if (rightv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L40; } if (i__ < *ilo) { i__ = *ilo - ii; } k = (integer) scale[i__]; if (k == i__) { goto L40; } dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L40: ; } } if (leftv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L50; } if (i__ < *ilo) { i__ = *ilo - ii; } k = (integer) scale[i__]; if (k == i__) { goto L50; } dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L50: ; } } } return 0; /* End of DGEBAK */ } /* dgebak_ */ /* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer * lda, integer *ilo, integer *ihi, doublereal *scale, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1, d__2; /* Local variables */ static integer iexc; static doublereal c__, f, g; static integer i__, j, k, l, m; static doublereal r__, s; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal sfmin1, sfmin2, sfmax1, sfmax2, ca, ra; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); static logical noconv; static integer ica, ira; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBAL balances a general real matrix A. This involves, first, permuting A by a similarity transformation to isolate eigenvalues in the first 1 to ILO-1 and last IHI+1 to N elements on the diagonal; and second, applying a diagonal similarity transformation to rows and columns ILO to IHI to make the rows and columns as close in norm as possible. Both steps are optional. Balancing may reduce the 1-norm of the matrix, and improve the accuracy of the computed eigenvalues and/or eigenvectors. Arguments ========= JOB (input) CHARACTER*1 Specifies the operations to be performed on A: = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 for i = 1,...,N; = 'P': permute only; = 'S': scale only; = 'B': both permute and scale. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the input matrix A. On exit, A is overwritten by the balanced matrix. If JOB = 'N', A is not referenced. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). ILO (output) INTEGER IHI (output) INTEGER ILO and IHI are set to integers such that on exit A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. If JOB = 'N' or 'S', ILO = 1 and IHI = N. SCALE (output) DOUBLE PRECISION array, dimension (N) Details of the permutations and scaling factors applied to A. If P(j) is the index of the row and column interchanged with row and column j and D(j) is the scaling factor applied to row and column j, then SCALE(j) = P(j) for j = 1,...,ILO-1 = D(j) for j = ILO,...,IHI = P(j) for j = IHI+1,...,N. The order in which the interchanges are made is N to IHI+1, then 1 to ILO-1. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The permutations consist of row and column interchanges which put the matrix in the form ( T1 X Y ) P A P = ( 0 B Z ) ( 0 0 T2 ) where T1 and T2 are upper triangular matrices whose eigenvalues lie along the diagonal. The column indices ILO and IHI mark the starting and ending columns of the submatrix B. Balancing consists of applying a diagonal similarity transformation inv(D) * B * D to make the 1-norms of each row of B and its corresponding column nearly equal. The output matrix is ( T1 X*D Y ) ( 0 inv(D)*B*D inv(D)*Z ). ( 0 0 T2 ) Information about the permutations P and the diagonal matrix D is returned in the vector SCALE. This subroutine is based on the EISPACK routine BALANC. Modified by Tzu-Yi Chen, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --scale; /* Function Body */ *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEBAL", &i__1); return 0; } k = 1; l = *n; if (*n == 0) { goto L210; } if (lsame_(job, "N")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { scale[i__] = 1.; /* L10: */ } goto L210; } if (lsame_(job, "S")) { goto L120; } /* Permutation to isolate eigenvalues if possible */ goto L50; /* Row and column exchange. */ L20: scale[m] = (doublereal) j; if (j == m) { goto L30; } dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1); i__1 = *n - k + 1; dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda); L30: switch (iexc) { case 1: goto L40; case 2: goto L80; } /* Search for rows isolating an eigenvalue and push them down. */ L40: if (l == 1) { goto L210; } --l; L50: for (j = l; j >= 1; --j) { i__1 = l; for (i__ = 1; i__ <= i__1; ++i__) { if (i__ == j) { goto L60; } if (a[j + i__ * a_dim1] != 0.) { goto L70; } L60: ; } m = l; iexc = 1; goto L20; L70: ; } goto L90; /* Search for columns isolating an eigenvalue and push them left. */ L80: ++k; L90: i__1 = l; for (j = k; j <= i__1; ++j) { i__2 = l; for (i__ = k; i__ <= i__2; ++i__) { if (i__ == j) { goto L100; } if (a[i__ + j * a_dim1] != 0.) { goto L110; } L100: ; } m = k; iexc = 2; goto L20; L110: ; } L120: i__1 = l; for (i__ = k; i__ <= i__1; ++i__) { scale[i__] = 1.; /* L130: */ } if (lsame_(job, "P")) { goto L210; } /* Balance the submatrix in rows K to L. Iterative loop for norm reduction */ sfmin1 = SAFEMINIMUM / PRECISION; sfmax1 = 1. / sfmin1; sfmin2 = sfmin1 * 2.; sfmax2 = 1. / sfmin2; L140: noconv = FALSE_; i__1 = l; for (i__ = k; i__ <= i__1; ++i__) { c__ = 0.; r__ = 0.; i__2 = l; for (j = k; j <= i__2; ++j) { if (j == i__) { goto L150; } c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1)); r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1)); L150: ; } ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1); ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1)); i__2 = *n - k + 1; ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda); ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1)); /* Guard against zero C or R due to underflow. */ if (c__ == 0. || r__ == 0.) { goto L200; } g = r__ / 2.; f = 1.; s = c__ + r__; L160: /* Computing MAX */ d__1 = max(f,c__); /* Computing MIN */ d__2 = min(r__,g); if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) { goto L170; } f *= 2.; c__ *= 2.; ca *= 2.; r__ /= 2.; g /= 2.; ra /= 2.; goto L160; L170: g = c__ / 2.; L180: /* Computing MIN */ d__1 = min(f,c__), d__1 = min(d__1,g); if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) { goto L190; } f /= 2.; c__ /= 2.; g /= 2.; ca /= 2.; r__ *= 2.; ra *= 2.; goto L180; /* Now balance. */ L190: if (c__ + r__ >= s * .95) { goto L200; } if (f < 1. && scale[i__] < 1.) { if (f * scale[i__] <= sfmin1) { goto L200; } } if (f > 1. && scale[i__] > 1.) { if (scale[i__] >= sfmax1 / f) { goto L200; } } g = 1. / f; scale[i__] *= f; noconv = TRUE_; i__2 = *n - k + 1; dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda); dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1); L200: ; } if (noconv) { goto L140; } L210: *ilo = k; *ihi = l; return 0; /* End of DGEBAL */ } /* dgebal_ */ /* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * taup, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBD2 reduces a real general m by n matrix A to upper or lower bidiagonal form B by an orthogonal transformation: Q' * A * P = B. If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. Arguments ========= M (input) INTEGER The number of rows in the matrix A. M >= 0. N (input) INTEGER The number of columns in the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n general matrix to be reduced. On exit, if m >= n, the diagonal and the first superdiagonal are overwritten with the upper bidiagonal matrix B; the elements below the diagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the first superdiagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors; if m < n, the diagonal and the first subdiagonal are overwritten with the lower bidiagonal matrix B; the elements below the first subdiagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the diagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (min(M,N)) The diagonal elements of the bidiagonal matrix B: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) The off-diagonal elements of the bidiagonal matrix B: if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: If m >= n, Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The contents of A on exit are illustrated by the following examples: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) ( v1 v2 v3 v4 v5 ) where d and e denote diagonal and off-diagonal elements of B, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info < 0) { i__1 = -(*info); xerbla_("DGEBD2", &i__1); return 0; } if (*m >= *n) { /* Reduce to upper bidiagonal form */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); d__[i__] = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; /* Apply H(i) to A(i:m,i+1:n) from the left */ if (i__ < *n) { i__2 = *m - i__ + 1; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, & tauq[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1] ); } a[i__ + i__ * a_dim1] = d__[i__]; if (i__ < *n) { /* Generate elementary reflector G(i) to annihilate A(i,i+2:n) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( i__3,*n) * a_dim1], lda, &taup[i__]); e[i__] = a[i__ + (i__ + 1) * a_dim1]; a[i__ + (i__ + 1) * a_dim1] = 1.; /* Apply G(i) to A(i+1:m,i+1:n) from the right */ i__2 = *m - i__; i__3 = *n - i__; dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + (i__ + 1) * a_dim1] = e[i__]; } else { taup[i__] = 0.; } /* L10: */ } } else { /* Reduce to lower bidiagonal form */ i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1], lda, &taup[i__]); d__[i__] = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; /* Apply G(i) to A(i+1:m,i:n) from the right */ if (i__ < *m) { i__2 = *m - i__; i__3 = *n - i__ + 1; dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, & taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); } a[i__ + i__ * a_dim1] = d__[i__]; if (i__ < *m) { /* Generate elementary reflector H(i) to annihilate A(i+2:m,i) */ i__2 = *m - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Apply H(i) to A(i+1:m,i+1:n) from the left */ i__2 = *m - i__; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], & c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + 1 + i__ * a_dim1] = e[i__]; } else { tauq[i__] = 0.; } /* L20: */ } } return 0; /* End of DGEBD2 */ } /* dgebd2_ */ /* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * taup, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nbmin, iinfo, minmn; extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer nb; extern /* Subroutine */ int dlabrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer nx; static doublereal ws; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwrkx, ldwrky, lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBRD reduces a general real M-by-N matrix A to upper or lower bidiagonal form B by an orthogonal transformation: Q**T * A * P = B. If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. Arguments ========= M (input) INTEGER The number of rows in the matrix A. M >= 0. N (input) INTEGER The number of columns in the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N general matrix to be reduced. On exit, if m >= n, the diagonal and the first superdiagonal are overwritten with the upper bidiagonal matrix B; the elements below the diagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the first superdiagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors; if m < n, the diagonal and the first subdiagonal are overwritten with the lower bidiagonal matrix B; the elements below the first subdiagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the diagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (min(M,N)) The diagonal elements of the bidiagonal matrix B: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) The off-diagonal elements of the bidiagonal matrix B: if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The length of the array WORK. LWORK >= max(1,M,N). For optimum performance LWORK >= (M+N)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: If m >= n, Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The contents of A on exit are illustrated by the following examples: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) ( v1 v2 v3 v4 v5 ) where d and e denote diagonal and off-diagonal elements of B, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; --work; /* Function Body */ *info = 0; /* Computing MAX */ i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nb = max(i__1,i__2); lwkopt = (*m + *n) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = max(1,*m); if (*lwork < max(i__1,*n) && ! lquery) { *info = -10; } } if (*info < 0) { i__1 = -(*info); xerbla_("DGEBRD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ minmn = min(*m,*n); if (minmn == 0) { work[1] = 1.; return 0; } ws = (doublereal) max(*m,*n); ldwrkx = *m; ldwrky = *n; if (nb > 1 && nb < minmn) { /* Set the crossover point NX. Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); /* Determine when to switch from blocked to unblocked code. */ if (nx < minmn) { ws = (doublereal) ((*m + *n) * nb); if ((doublereal) (*lwork) < ws) { /* Not enough work space for the optimal NB, consider using a smaller block size. */ nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); if (*lwork >= (*m + *n) * nbmin) { nb = *lwork / (*m + *n); } else { nb = 1; nx = minmn; } } } } else { nx = minmn; } i__1 = minmn - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Reduce rows and columns i:i+nb-1 to bidiagonal form and return the matrices X and Y which are needed to update the unreduced part of the matrix */ i__3 = *m - i__ + 1; i__4 = *n - i__ + 1; dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx * nb + 1], &ldwrky); /* Update the trailing submatrix A(i+nb:m,i+nb:n), using an update of the form A := A - V*Y' - X*U' */ i__3 = *m - i__ - nb + 1; i__4 = *n - i__ - nb + 1; dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b151, &a[ i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], & ldwrky, &c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); i__3 = *m - i__ - nb + 1; i__4 = *n - i__ - nb + 1; dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b151, & work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, & c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); /* Copy diagonal and off-diagonal elements of B back into A */ if (*m >= *n) { i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + j * a_dim1] = d__[j]; a[j + (j + 1) * a_dim1] = e[j]; /* L10: */ } } else { i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + j * a_dim1] = d__[j]; a[j + 1 + j * a_dim1] = e[j]; /* L20: */ } } /* L30: */ } /* Use unblocked code to reduce the remainder of the matrix */ i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], & tauq[i__], &taup[i__], &work[1], &iinfo); work[1] = ws; return 0; /* End of DGEBRD */ } /* dgebrd_ */ /* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal * a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl, integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer ibal; static char side[1]; static doublereal anrm; static integer ierr, itau; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer iwrk, nout; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, k; static doublereal r__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern doublereal dlapy2_(doublereal *, doublereal *); extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_( char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dgebal_(char *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static doublereal cs; static logical scalea; static doublereal cscale; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *); static logical select[1]; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dhseqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer minwrk, maxwrk; static logical wantvl; static doublereal smlnum; static integer hswork; static logical lquery, wantvr; static integer ihi; static doublereal scl; static integer ilo; static doublereal dum[1], eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEEV computes for an N-by-N real nonsymmetric matrix A, the eigenvalues and, optionally, the left and/or right eigenvectors. The right eigenvector v(j) of A satisfies A * v(j) = lambda(j) * v(j) where lambda(j) is its eigenvalue. The left eigenvector u(j) of A satisfies u(j)**H * A = lambda(j) * u(j)**H where u(j)**H denotes the conjugate transpose of u(j). The computed eigenvectors are normalized to have Euclidean norm equal to 1 and largest component real. Arguments ========= JOBVL (input) CHARACTER*1 = 'N': left eigenvectors of A are not computed; = 'V': left eigenvectors of A are computed. JOBVR (input) CHARACTER*1 = 'N': right eigenvectors of A are not computed; = 'V': right eigenvectors of A are computed. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N matrix A. On exit, A has been overwritten. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) WR and WI contain the real and imaginary parts, respectively, of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part first. VL (output) DOUBLE PRECISION array, dimension (LDVL,N) If JOBVL = 'V', the left eigenvectors u(j) are stored one after another in the columns of VL, in the same order as their eigenvalues. If JOBVL = 'N', VL is not referenced. If the j-th eigenvalue is real, then u(j) = VL(:,j), the j-th column of VL. If the j-th and (j+1)-st eigenvalues form a complex conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and u(j+1) = VL(:,j) - i*VL(:,j+1). LDVL (input) INTEGER The leading dimension of the array VL. LDVL >= 1; if JOBVL = 'V', LDVL >= N. VR (output) DOUBLE PRECISION array, dimension (LDVR,N) If JOBVR = 'V', the right eigenvectors v(j) are stored one after another in the columns of VR, in the same order as their eigenvalues. If JOBVR = 'N', VR is not referenced. If the j-th eigenvalue is real, then v(j) = VR(:,j), the j-th column of VR. If the j-th and (j+1)-st eigenvalues form a complex conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and v(j+1) = VR(:,j) - i*VR(:,j+1). LDVR (input) INTEGER The leading dimension of the array VR. LDVR >= 1; if JOBVR = 'V', LDVR >= N. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,3*N), and if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good performance, LWORK must generally be larger. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = i, the QR algorithm failed to compute all the eigenvalues, and no eigenvectors have been computed; elements i+1:N of WR and WI contain eigenvalues which have converged. ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --wr; --wi; vl_dim1 = *ldvl; vl_offset = 1 + vl_dim1 * 1; vl -= vl_offset; vr_dim1 = *ldvr; vr_offset = 1 + vr_dim1 * 1; vr -= vr_offset; --work; /* Function Body */ *info = 0; lquery = *lwork == -1; wantvl = lsame_(jobvl, "V"); wantvr = lsame_(jobvr, "V"); if (! wantvl && ! lsame_(jobvl, "N")) { *info = -1; } else if (! wantvr && ! lsame_(jobvr, "N")) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldvl < 1 || wantvl && *ldvl < *n) { *info = -9; } else if (*ldvr < 1 || wantvr && *ldvr < *n) { *info = -11; } /* Compute workspace (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV. HSWORK refers to the workspace preferred by DHSEQR, as calculated below. HSWORK is computed assuming ILO=1 and IHI=N, the worst case.) */ if (*info == 0) { if (*n == 0) { minwrk = 1; maxwrk = 1; } else { maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1, n, &c__0, (ftnlen)6, (ftnlen)1); if (wantvl) { minwrk = *n << 2; /* Computing MAX */ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1, "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen) 1); maxwrk = max(i__1,i__2); dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n << 2; maxwrk = max(i__1,i__2); } else if (wantvr) { minwrk = *n << 2; /* Computing MAX */ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1, "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen) 1); maxwrk = max(i__1,i__2); dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n << 2; maxwrk = max(i__1,i__2); } else { minwrk = *n * 3; dhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); } maxwrk = max(maxwrk,minwrk); } work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -13; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGEEV ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Get machine constants */ eps = PRECISION; smlnum = SAFEMINIMUM; bignum = 1. / smlnum; dlabad_(&smlnum, &bignum); smlnum = sqrt(smlnum) / eps; bignum = 1. / smlnum; /* Scale A if max element outside range [SMLNUM,BIGNUM] */ anrm = dlange_("M", n, n, &a[a_offset], lda, dum); scalea = FALSE_; if (anrm > 0. && anrm < smlnum) { scalea = TRUE_; cscale = smlnum; } else if (anrm > bignum) { scalea = TRUE_; cscale = bignum; } if (scalea) { dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & ierr); } /* Balance the matrix (Workspace: need N) */ ibal = 1; dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr); /* Reduce to upper Hessenberg form (Workspace: need 3*N, prefer 2*N+N*NB) */ itau = ibal + *n; iwrk = itau + *n; i__1 = *lwork - iwrk + 1; dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &ierr); if (wantvl) { /* Want left eigenvectors Copy Householder vectors to VL */ *(unsigned char *)side = 'L'; dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl) ; /* Generate orthogonal matrix in VL (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */ i__1 = *lwork - iwrk + 1; dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &i__1, &ierr); /* Perform QR iteration, accumulating Schur vectors in VL (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vl[vl_offset], ldvl, &work[iwrk], &i__1, info); if (wantvr) { /* Want left and right eigenvectors Copy Schur vectors to VR */ *(unsigned char *)side = 'B'; dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr); } } else if (wantvr) { /* Want right eigenvectors Copy Householder vectors to VR */ *(unsigned char *)side = 'R'; dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr) ; /* Generate orthogonal matrix in VR (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */ i__1 = *lwork - iwrk + 1; dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &i__1, &ierr); /* Perform QR iteration, accumulating Schur vectors in VR (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vr[vr_offset], ldvr, &work[iwrk], &i__1, info); } else { /* Compute eigenvalues only (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vr[vr_offset], ldvr, &work[iwrk], &i__1, info); } /* If INFO > 0 from DHSEQR, then quit */ if (*info > 0) { goto L50; } if (wantvl || wantvr) { /* Compute left and/or right eigenvectors (Workspace: need 4*N) */ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr); } if (wantvl) { /* Undo balancing of left eigenvectors (Workspace: need N) */ dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl, &ierr); /* Normalize left eigenvectors and make largest component real */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (wi[i__] == 0.) { scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); } else if (wi[i__] > 0.) { d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); scl = 1. / dlapy2_(&d__1, &d__2); dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing 2nd power */ d__1 = vl[k + i__ * vl_dim1]; /* Computing 2nd power */ d__2 = vl[k + (i__ + 1) * vl_dim1]; work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; /* L10: */ } k = idamax_(n, &work[iwrk], &c__1); dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1], &cs, &sn, &r__); drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) * vl_dim1 + 1], &c__1, &cs, &sn); vl[k + (i__ + 1) * vl_dim1] = 0.; } /* L20: */ } } if (wantvr) { /* Undo balancing of right eigenvectors (Workspace: need N) */ dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr, &ierr); /* Normalize right eigenvectors and make largest component real */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (wi[i__] == 0.) { scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); } else if (wi[i__] > 0.) { d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); scl = 1. / dlapy2_(&d__1, &d__2); dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing 2nd power */ d__1 = vr[k + i__ * vr_dim1]; /* Computing 2nd power */ d__2 = vr[k + (i__ + 1) * vr_dim1]; work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; /* L30: */ } k = idamax_(n, &work[iwrk], &c__1); dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1], &cs, &sn, &r__); drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) * vr_dim1 + 1], &c__1, &cs, &sn); vr[k + (i__ + 1) * vr_dim1] = 0.; } /* L40: */ } } /* Undo scaling if necessary */ L50: if (scalea) { i__1 = *n - *info; /* Computing MAX */ i__3 = *n - *info; i__2 = max(i__3,1); dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info + 1], &i__2, &ierr); i__1 = *n - *info; /* Computing MAX */ i__3 = *n - *info; i__2 = max(i__3,1); dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info + 1], &i__2, &ierr); if (*info > 0) { i__1 = ilo - 1; dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1], n, &ierr); i__1 = ilo - 1; dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1], n, &ierr); } } work[1] = (doublereal) maxwrk; return 0; /* End of DGEEV */ } /* dgeev_ */ /* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEHD2 reduces a real general matrix A to upper Hessenberg form H by an orthogonal similarity transformation: Q' * A * Q = H . Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that A is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL; otherwise they should be set to 1 and N respectively. See Further Details. 1 <= ILO <= IHI <= max(1,N). A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the n by n general matrix to be reduced. On exit, the upper triangle and the first subdiagonal of A are overwritten with the upper Hessenberg matrix H, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrix Q is represented as a product of (ihi-ilo) elementary reflectors Q = H(ilo) H(ilo+1) . . . H(ihi-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on exit in A(i+2:ihi,i), and tau in TAU(i). The contents of A are illustrated by the following example, with n = 7, ilo = 2 and ihi = 6: on entry, on exit, ( a a a a a a a ) ( a a h h h h a ) ( a a a a a a ) ( a h h h h a ) ( a a a a a a ) ( h h h h h h ) ( a a a a a a ) ( v2 h h h h h ) ( a a a a a a ) ( v2 v3 h h h h ) ( a a a a a a ) ( v2 v3 v4 h h h ) ( a ) ( a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEHD2", &i__1); return 0; } i__1 = *ihi - 1; for (i__ = *ilo; i__ <= i__1; ++i__) { /* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */ i__2 = *ihi - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); aii = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Apply H(i) to A(1:ihi,i+1:ihi) from the right */ i__2 = *ihi - i__; dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]); /* Apply H(i) to A(i+1:ihi,i+1:n) from the left */ i__2 = *ihi - i__; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + 1 + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DGEHD2 */ } /* dgehd2_ */ /* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, j; static doublereal t[4160] /* was [65][64] */; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nbmin, iinfo; extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), daxpy_( integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dgehd2_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlahr2_( integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib; static doublereal ei; static integer nb, nh; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEHRD reduces a real general matrix A to upper Hessenberg form H by an orthogonal similarity transformation: Q' * A * Q = H . Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that A is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL; otherwise they should be set to 1 and N respectively. See Further Details. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N general matrix to be reduced. On exit, the upper triangle and the first subdiagonal of A are overwritten with the upper Hessenberg matrix H, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to zero. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The length of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrix Q is represented as a product of (ihi-ilo) elementary reflectors Q = H(ilo) H(ilo+1) . . . H(ihi-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on exit in A(i+2:ihi,i), and tau in TAU(i). The contents of A are illustrated by the following example, with n = 7, ilo = 2 and ihi = 6: on entry, on exit, ( a a a a a a a ) ( a a h h h h a ) ( a a a a a a ) ( a h h h h a ) ( a a a a a a ) ( h h h h h h ) ( a a a a a a ) ( v2 h h h h h ) ( a a a a a a ) ( v2 v3 h h h h ) ( a a a a a a ) ( v2 v3 v4 h h h ) ( a ) ( a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). This file is a slight modification of LAPACK-3.0's DGEHRD subroutine incorporating improvements proposed by Quintana-Orti and Van de Geijn (2005). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; /* Computing MIN */ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nb = min(i__1,i__2); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*lwork < max(1,*n) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEHRD", &i__1); return 0; } else if (lquery) { return 0; } /* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */ i__1 = *ilo - 1; for (i__ = 1; i__ <= i__1; ++i__) { tau[i__] = 0.; /* L10: */ } i__1 = *n - 1; for (i__ = max(1,*ihi); i__ <= i__1; ++i__) { tau[i__] = 0.; /* L20: */ } /* Quick return if possible */ nh = *ihi - *ilo + 1; if (nh <= 1) { work[1] = 1.; return 0; } /* Determine the block size Computing MIN */ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nb = min(i__1,i__2); nbmin = 2; iws = 1; if (nb > 1 && nb < nh) { /* Determine when to cross over from blocked to unblocked code (last block is always handled by unblocked code) Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < nh) { /* Determine if workspace is large enough for blocked code */ iws = *n * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: determine the minimum value of NB, and reduce NB or force use of unblocked code Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); if (*lwork >= *n * nbmin) { nb = *lwork / *n; } else { nb = 1; } } } } ldwork = *n; if (nb < nbmin || nb >= nh) { /* Use unblocked code below */ i__ = *ilo; } else { /* Use blocked code */ i__1 = *ihi - 1 - nx; i__2 = nb; for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = nb, i__4 = *ihi - i__; ib = min(i__3,i__4); /* Reduce columns i:i+ib-1 to Hessenberg form, returning the matrices V and T of the block reflector H = I - V*T*V' which performs the reduction, and also the matrix Y = A*V*T */ dlahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, & c__65, &work[1], &ldwork); /* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the right, computing A := A - Y * V'. V(i+ib,ib-1) must be set to 1 */ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1]; a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.; i__3 = *ihi - i__ - ib + 1; dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b151, & work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, & c_b15, &a[(i__ + ib) * a_dim1 + 1], lda); a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei; /* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the right */ i__3 = ib - 1; dtrmm_("Right", "Lower", "Transpose", "Unit", &i__, &i__3, &c_b15, &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &ldwork); i__3 = ib - 2; for (j = 0; j <= i__3; ++j) { daxpy_(&i__, &c_b151, &work[ldwork * j + 1], &c__1, &a[(i__ + j + 1) * a_dim1 + 1], &c__1); /* L30: */ } /* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the left */ i__3 = *ihi - i__; i__4 = *n - i__ - ib + 1; dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork); /* L40: */ } } /* Use unblocked code to reduce the rest of the matrix */ dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo); work[1] = (doublereal) iws; return 0; /* End of DGEHRD */ } /* dgehrd_ */ /* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, k; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELQ2 computes an LQ factorization of a real m by n matrix A: A = L * Q. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the elements on and below the diagonal of the array contain the m by min(m,n) lower trapezoidal matrix L (L is lower triangular if m <= n); the elements above the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (M) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(k) . . . H(2) H(1), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGELQ2", &i__1); return 0; } k = min(*m,*n); i__1 = k; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1] , lda, &tau[i__]); if (i__ < *m) { /* Apply H(i) to A(i+1:m,i:n) from the right */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; i__2 = *m - i__; i__3 = *n - i__ + 1; dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); a[i__ + i__ * a_dim1] = aii; } /* L10: */ } return 0; /* End of DGELQ2 */ } /* dgelq2_ */ /* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, k, nbmin, iinfo; extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELQF computes an LQ factorization of a real M-by-N matrix A: A = L * Q. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, the elements on and below the diagonal of the array contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower triangular if m <= n); the elements above the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,M). For optimum performance LWORK >= M*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(k) . . . H(2) H(1), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); lwkopt = *m * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else if (*lwork < max(1,*m) && ! lquery) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGELQF", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ k = min(*m,*n); if (k == 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *m; if (nb > 1 && nb < k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *m; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < k && nx < k) { /* Use blocked code initially */ i__1 = k - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = k - i__ + 1; ib = min(i__3,nb); /* Compute the LQ factorization of the current block A(i:i+ib-1,i:n) */ i__3 = *n - i__ + 1; dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ 1], &iinfo); if (i__ + ib <= *m) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__3 = *n - i__ + 1; dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H to A(i+ib:m,i:n) from the right */ i__3 = *m - i__ - ib + 1; i__4 = *n - i__ + 1; dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + 1], &ldwork); } /* L10: */ } } else { i__ = 1; } /* Use unblocked code to factor the last or only block. */ if (i__ <= k) { i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] , &iinfo); } work[1] = (doublereal) iws; return 0; /* End of DGELQF */ } /* dgelqf_ */ /* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal * s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork, integer *iwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; /* Builtin functions */ double log(doublereal); /* Local variables */ static doublereal anrm, bnrm; static integer itau, nlvl, iascl, ibscl; static doublereal sfmin; static integer minmn, maxmn, itaup, itauq, mnthr, nwork; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static integer ie, il; extern /* Subroutine */ int dgebrd_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer mm; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlalsd_(char *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dgeqrf_( integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer wlalsd; extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer ldwork; extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer minwrk, maxwrk; static doublereal smlnum; static logical lquery; static integer smlsiz; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELSD computes the minimum-norm solution to a real linear least squares problem: minimize 2-norm(| b - A*x |) using the singular value decomposition (SVD) of A. A is an M-by-N matrix which may be rank-deficient. Several right hand side vectors b and solution vectors x can be handled in a single call; they are stored as the columns of the M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix X. The problem is solved in three steps: (1) Reduce the coefficient matrix A to bidiagonal form with Householder transformations, reducing the original problem into a "bidiagonal least squares problem" (BLS) (2) Solve the BLS using a divide and conquer approach. (3) Apply back all the Householder tranformations to solve the original least squares problem. The effective rank of A is determined by treating as zero those singular values which are less than RCOND times the largest singular value. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= M (input) INTEGER The number of rows of A. M >= 0. N (input) INTEGER The number of columns of A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrices B and X. NRHS >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, A has been destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the M-by-NRHS right hand side matrix B. On exit, B is overwritten by the N-by-NRHS solution matrix X. If m >= n and RANK = n, the residual sum-of-squares for the solution in the i-th column is given by the sum of squares of elements n+1:m in that column. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,max(M,N)). S (output) DOUBLE PRECISION array, dimension (min(M,N)) The singular values of A in decreasing order. The condition number of A in the 2-norm = S(1)/S(min(m,n)). RCOND (input) DOUBLE PRECISION RCOND is used to determine the effective rank of A. Singular values S(i) <= RCOND*S(1) are treated as zero. If RCOND < 0, machine precision is used instead. RANK (output) INTEGER The effective rank of A, i.e., the number of singular values which are greater than RCOND*S(1). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK must be at least 1. The exact minimum amount of workspace needed depends on M, N and NRHS. As long as LWORK is at least 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, if M is greater than or equal to N or 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, if M is less than N, the code will execute correctly. SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25), and NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) For good performance, LWORK should generally be larger. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, where MINMN = MIN( M,N ). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. > 0: the algorithm for computing the SVD failed to converge; if INFO = i, i off-diagonal elements of an intermediate bidiagonal form did not converge to zero. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input arguments. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; --s; --work; --iwork; /* Function Body */ *info = 0; minmn = min(*m,*n); maxmn = max(*m,*n); mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, ( ftnlen)1); lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*ldb < max(1,maxmn)) { *info = -7; } smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); /* Compute workspace. (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV.) */ minwrk = 1; minmn = max(1,minmn); /* Computing MAX */ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) / log(2.)) + 1; nlvl = max(i__1,0); if (*info == 0) { maxwrk = 0; mm = *m; if (*m >= *n && *m >= mnthr) { /* Path 1a - overdetermined, with many more rows than columns. */ mm = *n; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2); maxwrk = max(i__1,i__2); } if (*m >= *n) { /* Path 1 - overdetermined or exactly determined. Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD" , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR", "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing 2nd power */ i__1 = smlsiz + 1; wlalsd = *n * 9 + (*n << 1) * smlsiz + (*n << 3) * nlvl + *n * * nrhs + i__1 * i__1; /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + wlalsd; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2), i__2 = *n * 3 + wlalsd; minwrk = max(i__1,i__2); } if (*n > *m) { /* Computing 2nd power */ i__1 = smlsiz + 1; wlalsd = *m * 9 + (*m << 1) * smlsiz + (*m << 3) * nlvl + *m * * nrhs + i__1 * i__1; if (*n >= mnthr) { /* Path 2a - underdetermined, with many more columns than rows. */ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs * ilaenv_(& c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, (ftnlen)6, ( ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) * ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, ( ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); if (*nrhs > 1) { /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs; maxwrk = max(i__1,i__2); } else { /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 1); maxwrk = max(i__1,i__2); } /* Computing MAX */ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ", "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + wlalsd; maxwrk = max(i__1,i__2); } else { /* Path 2 - remaining underdetermined cases. */ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR" , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + wlalsd; maxwrk = max(i__1,i__2); } /* Computing MAX */ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2), i__2 = *m * 3 + wlalsd; minwrk = max(i__1,i__2); } minwrk = min(minwrk,maxwrk); work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGELSD", &i__1); return 0; } else if (lquery) { goto L10; } /* Quick return if possible. */ if (*m == 0 || *n == 0) { *rank = 0; return 0; } /* Get machine parameters. */ eps = PRECISION; sfmin = SAFEMINIMUM; smlnum = sfmin / eps; bignum = 1. / smlnum; dlabad_(&smlnum, &bignum); /* Scale A if max entry outside range [SMLNUM,BIGNUM]. */ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]); iascl = 0; if (anrm > 0. && anrm < smlnum) { /* Scale matrix norm up to SMLNUM. */ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, info); iascl = 1; } else if (anrm > bignum) { /* Scale matrix norm down to BIGNUM. */ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, info); iascl = 2; } else if (anrm == 0.) { /* Matrix all zero. Return zero solution. */ i__1 = max(*m,*n); dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); dlaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1); *rank = 0; goto L10; } /* Scale B if max entry outside range [SMLNUM,BIGNUM]. */ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]); ibscl = 0; if (bnrm > 0. && bnrm < smlnum) { /* Scale matrix norm up to SMLNUM. */ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb, info); ibscl = 1; } else if (bnrm > bignum) { /* Scale matrix norm down to BIGNUM. */ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb, info); ibscl = 2; } /* If M < N make sure certain entries of B are zero. */ if (*m < *n) { i__1 = *n - *m; dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb); } /* Overdetermined case. */ if (*m >= *n) { /* Path 1 - overdetermined or exactly determined. */ mm = *m; if (*m >= mnthr) { /* Path 1a - overdetermined, with many more rows than columns. */ mm = *n; itau = 1; nwork = itau + *n; /* Compute A=Q*R. (Workspace: need 2*N, prefer N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, info); /* Multiply B by transpose(Q). (Workspace: need N+NRHS, prefer N+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[ b_offset], ldb, &work[nwork], &i__1, info); /* Zero out below R. */ if (*n > 1) { i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], lda); } } ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A. (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors of R. (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of R. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], & b[b_offset], ldb, &work[nwork], &i__1, info); } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max( i__1,*nrhs), i__2 = *n - *m * 3, i__1 = max(i__1,i__2); if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,wlalsd)) { /* Path 2a - underdetermined, with many more columns than rows and sufficient workspace for an efficient algorithm. */ ldwork = *m; /* Computing MAX Computing MAX */ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3; i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda + *m + *m * *nrhs, i__1 = max(i__1,i__2), i__2 = (*m << 2) + *m * *lda + wlalsd; if (*lwork >= max(i__1,i__2)) { ldwork = *lda; } itau = 1; nwork = *m + 1; /* Compute A=L*Q. (Workspace: need 2*M, prefer M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, info); il = nwork; /* Copy L to WORK(IL), zeroing out above its diagonal. */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork); i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], & ldwork); ie = il + ldwork * *m; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IL). (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq], &work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors of L. (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of L. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Zero out below first M rows of B. */ i__1 = *n - *m; dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb); nwork = itau + *m; /* Multiply transpose(Q) by B. (Workspace: need M+NRHS, prefer M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[ b_offset], ldb, &work[nwork], &i__1, info); } else { /* Path 2 - remaining underdetermined cases. */ ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize A. (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors. (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq] , &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of A. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup] , &b[b_offset], ldb, &work[nwork], &i__1, info); } } /* Undo scaling. */ if (iascl == 1) { dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb, info); dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & minmn, info); } else if (iascl == 2) { dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb, info); dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & minmn, info); } if (ibscl == 1) { dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb, info); } else if (ibscl == 2) { dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb, info); } L10: work[1] = (doublereal) maxwrk; return 0; /* End of DGELSD */ } /* dgelsd_ */ /* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, k; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEQR2", &i__1); return 0; } k = min(*m,*n); i__1 = k; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1] , &c__1, &tau[i__]); if (i__ < *n) { /* Apply H(i) to A(i:m,i+1:n) from the left */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; i__2 = *m - i__ + 1; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + i__ * a_dim1] = aii; } /* L10: */ } return 0; /* End of DGEQR2 */ } /* dgeqr2_ */ /* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, k, nbmin, iinfo; extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEQRF computes a QR factorization of a real M-by-N matrix A: A = Q * R. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else if (*lwork < max(1,*n) && ! lquery) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEQRF", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ k = min(*m,*n); if (k == 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *n; if (nb > 1 && nb < k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < k && nx < k) { /* Use blocked code initially */ i__1 = k - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = k - i__ + 1; ib = min(i__3,nb); /* Compute the QR factorization of the current block A(i:m,i:i+ib-1) */ i__3 = *m - i__ + 1; dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ 1], &iinfo); if (i__ + ib <= *n) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__3 = *m - i__ + 1; dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H' to A(i:m,i+ib:n) from the left */ i__3 = *m - i__ + 1; i__4 = *n - i__ - ib + 1; dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib + 1], &ldwork); } /* L10: */ } } else { i__ = 1; } /* Use unblocked code to factor the last or only block. */ if (i__ <= k) { i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] , &iinfo); } work[1] = (doublereal) iws; return 0; /* End of DGEQRF */ } /* dgeqrf_ */ /* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal * a, integer *lda, doublereal *s, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, integer *iwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer iscl; static doublereal anrm; static integer idum[1], ierr, itau, i__; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); static integer chunk, minmn, wrkbl, itaup, itauq, mnthr; static logical wntqa; static integer nwork; static logical wntqn, wntqo, wntqs; static integer ie; extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer il; extern /* Subroutine */ int dgebrd_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer ir, bdspac; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); static integer iu; extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dgeqrf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *), dorgbr_(char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt; static doublereal smlnum; static logical wntqas, lquery; static integer blk; static doublereal dum[1], eps; static integer ivt; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGESDD computes the singular value decomposition (SVD) of a real M-by-N matrix A, optionally computing the left and right singular vectors. If singular vectors are desired, it uses a divide-and-conquer algorithm. The SVD is written A = U * SIGMA * transpose(V) where SIGMA is an M-by-N matrix which is zero except for its min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA are the singular values of A; they are real and non-negative, and are returned in descending order. The first min(m,n) columns of U and V are the left and right singular vectors of A. Note that the routine returns VT = V**T, not V. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= JOBZ (input) CHARACTER*1 Specifies options for computing all or part of the matrix U: = 'A': all M columns of U and all N rows of V**T are returned in the arrays U and VT; = 'S': the first min(M,N) columns of U and the first min(M,N) rows of V**T are returned in the arrays U and VT; = 'O': If M >= N, the first N columns of U are overwritten on the array A and all rows of V**T are returned in the array VT; otherwise, all columns of U are returned in the array U and the first M rows of V**T are overwritten in the array A; = 'N': no columns of U or rows of V**T are computed. M (input) INTEGER The number of rows of the input matrix A. M >= 0. N (input) INTEGER The number of columns of the input matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, if JOBZ = 'O', A is overwritten with the first N columns of U (the left singular vectors, stored columnwise) if M >= N; A is overwritten with the first M rows of V**T (the right singular vectors, stored rowwise) otherwise. if JOBZ .ne. 'O', the contents of A are destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). S (output) DOUBLE PRECISION array, dimension (min(M,N)) The singular values of A, sorted so that S(i) >= S(i+1). U (output) DOUBLE PRECISION array, dimension (LDU,UCOL) UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; UCOL = min(M,N) if JOBZ = 'S'. If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M orthogonal matrix U; if JOBZ = 'S', U contains the first min(M,N) columns of U (the left singular vectors, stored columnwise); if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. LDU (input) INTEGER The leading dimension of the array U. LDU >= 1; if JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. VT (output) DOUBLE PRECISION array, dimension (LDVT,N) If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the N-by-N orthogonal matrix V**T; if JOBZ = 'S', VT contains the first min(M,N) rows of V**T (the right singular vectors, stored rowwise); if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= 1; if JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; if JOBZ = 'S', LDVT >= min(M,N). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK; LWORK (input) INTEGER The dimension of the array WORK. LWORK >= 1. If JOBZ = 'N', LWORK >= 3*min(M,N) + max(max(M,N),7*min(M,N)). If JOBZ = 'O', LWORK >= 3*min(M,N)*min(M,N) + max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). If JOBZ = 'S' or 'A' LWORK >= 3*min(M,N)*min(M,N) + max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). For good performance, LWORK should generally be larger. If LWORK = -1 but other input arguments are legal, WORK(1) returns the optimal LWORK. IWORK (workspace) INTEGER array, dimension (8*min(M,N)) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: DBDSDC did not converge, updating process failed. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --s; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --work; --iwork; /* Function Body */ *info = 0; minmn = min(*m,*n); wntqa = lsame_(jobz, "A"); wntqs = lsame_(jobz, "S"); wntqas = wntqa || wntqs; wntqo = lsame_(jobz, "O"); wntqn = lsame_(jobz, "N"); lquery = *lwork == -1; if (! (wntqa || wntqs || wntqo || wntqn)) { *info = -1; } else if (*m < 0) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < * m) { *info = -8; } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn || wntqo && *m >= *n && *ldvt < *n) { *info = -10; } /* Compute workspace (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV.) */ if (*info == 0) { minwrk = 1; maxwrk = 1; if (*m >= *n && minmn > 0) { /* Compute space needed for DBDSDC */ mnthr = (integer) (minmn * 11. / 6.); if (wntqn) { bdspac = *n * 7; } else { bdspac = *n * 3 * *n + (*n << 2); } if (*m >= mnthr) { if (wntqn) { /* Path 1 (M much larger than N, JOBZ='N') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n; maxwrk = max(i__1,i__2); minwrk = bdspac + *n; } else if (wntqo) { /* Path 2 (M much larger than N, JOBZ='O') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + (*n << 1) * *n; minwrk = bdspac + (*n << 1) * *n + *n * 3; } else if (wntqs) { /* Path 3 (M much larger than N, JOBZ='S') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *n * *n; minwrk = bdspac + *n * *n + *n * 3; } else if (wntqa) { /* Path 4 (M much larger than N, JOBZ='A') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR", " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *n * *n; minwrk = bdspac + *n * *n + *n * 3; } } else { /* Path 5 (M at least N, but not much larger) */ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (wntqn) { /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } else if (wntqo) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *n; /* Computing MAX */ i__1 = *m, i__2 = *n * *n + bdspac; minwrk = *n * 3 + max(i__1,i__2); } else if (wntqs) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } else if (wntqa) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } } } else if (minmn > 0) { /* Compute space needed for DBDSDC */ mnthr = (integer) (minmn * 11. / 6.); if (wntqn) { bdspac = *m * 7; } else { bdspac = *m * 3 * *m + (*m << 2); } if (*n >= mnthr) { if (wntqn) { /* Path 1t (N much larger than M, JOBZ='N') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m; maxwrk = max(i__1,i__2); minwrk = bdspac + *m; } else if (wntqo) { /* Path 2t (N much larger than M, JOBZ='O') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + (*m << 1) * *m; minwrk = bdspac + (*m << 1) * *m + *m * 3; } else if (wntqs) { /* Path 3t (N much larger than M, JOBZ='S') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *m; minwrk = bdspac + *m * *m + *m * 3; } else if (wntqa) { /* Path 4t (N much larger than M, JOBZ='A') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ", " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *m; minwrk = bdspac + *m * *m + *m * 3; } } else { /* Path 5t (N greater than M, but not much larger) */ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (wntqn) { /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } else if (wntqo) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *n; /* Computing MAX */ i__1 = *n, i__2 = *m * *m + bdspac; minwrk = *m * 3 + max(i__1,i__2); } else if (wntqs) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } else if (wntqa) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } } } maxwrk = max(maxwrk,minwrk); work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGESDD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Get machine constants */ eps = PRECISION; smlnum = sqrt(SAFEMINIMUM) / eps; bignum = 1. / smlnum; /* Scale A if max element outside range [SMLNUM,BIGNUM] */ anrm = dlange_("M", m, n, &a[a_offset], lda, dum); iscl = 0; if (anrm > 0. && anrm < smlnum) { iscl = 1; dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, & ierr); } else if (anrm > bignum) { iscl = 1; dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, & ierr); } if (*m >= *n) { /* A has at least as many rows as columns. If A has sufficiently more rows than columns, first reduce using the QR decomposition (if sufficient workspace available) */ if (*m >= mnthr) { if (wntqn) { /* Path 1 (M much larger than N, JOBZ='N') No singular vectors to be computed */ itau = 1; nwork = itau + *n; /* Compute A=Q*R (Workspace: need 2*N, prefer N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Zero out below R */ i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], lda); ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A (Workspace: need 4*N, prefer 3*N+2*N*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); nwork = ie + *n; /* Perform bidiagonal SVD, computing singular values only (Workspace: need N+BDSPAC) */ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { /* Path 2 (M much larger than N, JOBZ = 'O') N left singular vectors to be overwritten on A and N right singular vectors to be computed in VT */ ir = 1; /* WORK(IR) is LDWRKR by N */ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) { ldwrkr = *lda; } else { ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n; } itau = ir + ldwrkr * *n; nwork = itau + *n; /* Compute A=Q*R (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Copy R to WORK(IR), zeroing out below it */ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &work[ir + 1], & ldwrkr); /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__1 = *lwork - nwork + 1; dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, &ierr); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in VT, copying result to WORK(IR) (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); /* WORK(IU) is N by N */ iu = nwork; nwork = iu + *n * *n; /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite WORK(IU) by left singular vectors of R and VT by right singular vectors of R (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ itauq], &work[iu], n, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); /* Multiply Q in A by left singular vectors of R in WORK(IU), storing result in WORK(IR) and copying to A (Workspace: need 2*N*N, prefer N*N+M*N) */ i__1 = *m; i__2 = ldwrkr; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = *m - i__ + 1; chunk = min(i__3,ldwrkr); dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1], lda, &work[iu], n, &c_b29, &work[ir], &ldwrkr); dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + a_dim1], lda); /* L10: */ } } else if (wntqs) { /* Path 3 (M much larger than N, JOBZ='S') N left singular vectors to be computed in U and N right singular vectors to be computed in VT */ ir = 1; /* WORK(IR) is N by N */ ldwrkr = *n; itau = ir + ldwrkr * *n; nwork = itau + *n; /* Compute A=Q*R (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); /* Copy R to WORK(IR), zeroing out below it */ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); i__2 = *n - 1; i__1 = *n - 1; dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &work[ir + 1], & ldwrkr); /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__2, &ierr); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in WORK(IR) (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagoal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of R and VT by right singular vectors of R (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply Q in A by left singular vectors of R in WORK(IR), storing result in U (Workspace: need N*N) */ dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr); dgemm_("N", "N", m, n, n, &c_b15, &a[a_offset], lda, &work[ir] , &ldwrkr, &c_b29, &u[u_offset], ldu); } else if (wntqa) { /* Path 4 (M much larger than N, JOBZ='A') M left singular vectors to be computed in U and N right singular vectors to be computed in VT */ iu = 1; /* WORK(IU) is N by N */ ldwrku = *n; itau = iu + ldwrku * *n; nwork = itau + *n; /* Compute A=Q*R, copying result to U (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); /* Generate Q in U (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork], &i__2, &ierr); /* Produce R in A, zeroing out other entries */ i__2 = *n - 1; i__1 = *n - 1; dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &a[a_dim1 + 2], lda); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite WORK(IU) by left singular vectors of R and VT by right singular vectors of R (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply Q in U by left singular vectors of R in WORK(IU), storing result in A (Workspace: need N*N) */ dgemm_("N", "N", m, n, n, &c_b15, &u[u_offset], ldu, &work[iu] , &ldwrku, &c_b29, &a[a_offset], lda); /* Copy left singular vectors of A from A to U */ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); } } else { /* M .LT. MNTHR Path 5 (M at least N, but not much larger) Reduce to bidiagonal form without QR decomposition */ ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize A (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__2, &ierr); if (wntqn) { /* Perform bidiagonal SVD, only computing singular values (Workspace: need N+BDSPAC) */ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { iu = nwork; if (*lwork >= *m * *n + *n * 3 + bdspac) { /* WORK( IU ) is M by N */ ldwrku = *m; nwork = iu + ldwrku * *n; dlaset_("F", m, n, &c_b29, &c_b29, &work[iu], &ldwrku); } else { /* WORK( IU ) is N by N */ ldwrku = *n; nwork = iu + ldwrku * *n; /* WORK(IR) is LDWRKR by N */ ir = nwork; ldwrkr = (*lwork - *n * *n - *n * 3) / *n; } nwork = iu + ldwrku * *n; /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, & vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[ 1], info); /* Overwrite VT by right singular vectors of A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); if (*lwork >= *m * *n + *n * 3 + bdspac) { /* Overwrite WORK(IU) by left singular vectors of A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & ierr); /* Copy left singular vectors of A from WORK(IU) to A */ dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda); } else { /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], & work[nwork], &i__2, &ierr); /* Multiply Q in A by left singular vectors of bidiagonal matrix in WORK(IU), storing result in WORK(IR) and copying to A (Workspace: need 2*N*N, prefer N*N+M*N) */ i__2 = *m; i__1 = ldwrkr; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Computing MIN */ i__3 = *m - i__ + 1; chunk = min(i__3,ldwrkr); dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1], lda, &work[iu], &ldwrku, &c_b29, & work[ir], &ldwrkr); dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + a_dim1], lda); /* L20: */ } } } else if (wntqs) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dlaset_("F", m, n, &c_b29, &c_b29, &u[u_offset], ldu); dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 3*N, prefer 2*N+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } else if (wntqa) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dlaset_("F", m, m, &c_b29, &c_b29, &u[u_offset], ldu); dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Set the right corner of U to identity matrix */ if (*m > *n) { i__1 = *m - *n; i__2 = *m - *n; dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &u[*n + 1 + (* n + 1) * u_dim1], ldu); } /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } } } else { /* A has more columns than rows. If A has sufficiently more columns than rows, first reduce using the LQ decomposition (if sufficient workspace available) */ if (*n >= mnthr) { if (wntqn) { /* Path 1t (N much larger than M, JOBZ='N') No singular vectors to be computed */ itau = 1; nwork = itau + *m; /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Zero out above L */ i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &a[(a_dim1 << 1) + 1], lda); ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in A (Workspace: need 4*M, prefer 3*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); nwork = ie + *m; /* Perform bidiagonal SVD, computing singular values only (Workspace: need M+BDSPAC) */ dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { /* Path 2t (N much larger than M, JOBZ='O') M right singular vectors to be overwritten on A and M left singular vectors to be computed in U */ ivt = 1; /* IVT is M by M */ il = ivt + *m * *m; if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) { /* WORK(IL) is M by N */ ldwrkl = *m; chunk = *n; } else { ldwrkl = *m; chunk = (*lwork - *m * *m) / *m; } itau = il + ldwrkl * *m; nwork = itau + *m; /* Compute A=L*Q (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Copy L to WORK(IL), zeroing about above it */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwrkl], &ldwrkl); /* Generate Q in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__1 = *lwork - nwork + 1; dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, &ierr); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IL) (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U, and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M+M*M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], m, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of L and WORK(IVT) by right singular vectors of L (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr); /* Multiply right singular vectors of L in WORK(IVT) by Q in A, storing result in WORK(IL) and copying to A (Workspace: need 2*M*M, prefer M*M+M*N) */ i__1 = *n; i__2 = chunk; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = *n - i__ + 1; blk = min(i__3,chunk); dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], m, &a[ i__ * a_dim1 + 1], lda, &c_b29, &work[il], & ldwrkl); dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1 + 1], lda); /* L30: */ } } else if (wntqs) { /* Path 3t (N much larger than M, JOBZ='S') M right singular vectors to be computed in VT and M left singular vectors to be computed in U */ il = 1; /* WORK(IL) is M by M */ ldwrkl = *m; itau = il + ldwrkl * *m; nwork = itau + *m; /* Compute A=L*Q (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); /* Copy L to WORK(IL), zeroing out above it */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); i__2 = *m - 1; i__1 = *m - 1; dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &work[il + ldwrkl], &ldwrkl); /* Generate Q in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], &i__2, &ierr); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IU), copying result to U (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of L and VT by right singular vectors of L (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply right singular vectors of L in WORK(IL) by Q in A, storing result in VT (Workspace: need M*M) */ dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl); dgemm_("N", "N", m, n, m, &c_b15, &work[il], &ldwrkl, &a[ a_offset], lda, &c_b29, &vt[vt_offset], ldvt); } else if (wntqa) { /* Path 4t (N much larger than M, JOBZ='A') N right singular vectors to be computed in VT and M left singular vectors to be computed in U */ ivt = 1; /* WORK(IVT) is M by M */ ldwkvt = *m; itau = ivt + ldwkvt * *m; nwork = itau + *m; /* Compute A=L*Q, copying result to VT (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); /* Generate Q in VT (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[ nwork], &i__2, &ierr); /* Produce L in A, zeroing out other entries */ i__2 = *m - 1; i__1 = *m - 1; dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &a[(a_dim1 << 1) + 1], lda); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in A (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M+M*M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] , info); /* Overwrite U by left singular vectors of L and WORK(IVT) by right singular vectors of L (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, & ierr); /* Multiply right singular vectors of L in WORK(IVT) by Q in VT, storing result in A (Workspace: need M*M) */ dgemm_("N", "N", m, n, m, &c_b15, &work[ivt], &ldwkvt, &vt[ vt_offset], ldvt, &c_b29, &a[a_offset], lda); /* Copy right singular vectors of A from A to VT */ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); } } else { /* N .LT. MNTHR Path 5t (N greater than M, but not much larger) Reduce to bidiagonal form without LQ decomposition */ ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize A (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__2, &ierr); if (wntqn) { /* Perform bidiagonal SVD, only computing singular values (Workspace: need M+BDSPAC) */ dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { ldwkvt = *m; ivt = nwork; if (*lwork >= *m * *n + *m * 3 + bdspac) { /* WORK( IVT ) is M by N */ dlaset_("F", m, n, &c_b29, &c_b29, &work[ivt], &ldwkvt); nwork = ivt + ldwkvt * *n; } else { /* WORK( IVT ) is M by M */ nwork = ivt + ldwkvt * *m; il = nwork; /* WORK(IL) is M by CHUNK */ chunk = (*lwork - *m * *m - *m * 3) / *m; } /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M*M+BDSPAC) */ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] , info); /* Overwrite U by left singular vectors of A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); if (*lwork >= *m * *n + *m * 3 + bdspac) { /* Overwrite WORK(IVT) by left singular vectors of A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &ierr); /* Copy right singular vectors of A from WORK(IVT) to A */ dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda); } else { /* Generate P**T in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], & work[nwork], &i__2, &ierr); /* Multiply Q in A by right singular vectors of bidiagonal matrix in WORK(IVT), storing result in WORK(IL) and copying to A (Workspace: need 2*M*M, prefer M*M+M*N) */ i__2 = *n; i__1 = chunk; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Computing MIN */ i__3 = *n - i__ + 1; blk = min(i__3,chunk); dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], & ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b29, & work[il], m); dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 + 1], lda); /* L40: */ } } } else if (wntqs) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dlaset_("F", m, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 3*M, prefer 2*M+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } else if (wntqa) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dlaset_("F", n, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Set the right corner of VT to identity matrix */ if (*n > *m) { i__1 = *n - *m; i__2 = *n - *m; dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &vt[*m + 1 + (* m + 1) * vt_dim1], ldvt); } /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 2*M+N, prefer 2*M+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } } } /* Undo scaling if necessary */ if (iscl == 1) { if (anrm > bignum) { dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & minmn, &ierr); } if (anrm < smlnum) { dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & minmn, &ierr); } } /* Return optimal workspace in WORK(1) */ work[1] = (doublereal) maxwrk; return 0; /* End of DGESDD */ } /* dgesdd_ */ /* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1; /* Local variables */ extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGESV computes the solution to a real system of linear equations A * X = B, where A is an N-by-N matrix and X and B are N-by-NRHS matrices. The LU decomposition with partial pivoting and row interchanges is used to factor A as A = P * L * U, where P is a permutation matrix, L is unit lower triangular, and U is upper triangular. The factored form of A is then used to solve the system of equations A * X = B. Arguments ========= N (input) INTEGER The number of linear equations, i.e., the order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N coefficient matrix A. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (output) INTEGER array, dimension (N) The pivot indices that define the permutation matrix P; row i of the matrix was interchanged with row IPIV(i). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the N-by-NRHS matrix of right hand side matrix B. On exit, if INFO = 0, the N-by-NRHS solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, so the solution could not be computed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*nrhs < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } else if (*ldb < max(1,*n)) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGESV ", &i__1); return 0; } /* Compute the LU factorization of A. */ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info); if (*info == 0) { /* Solve the system A*X = B, overwriting B with X. */ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[ b_offset], ldb, info); } return 0; /* End of DGESV */ } /* dgesv_ */ /* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer * lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal sfmin; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jp; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). IPIV (output) INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -k, the k-th argument had an illegal value > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETF2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Compute machine safe minimum */ sfmin = SAFEMINIMUM; i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { /* Find pivot and test for singularity. */ i__2 = *m - j + 1; jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1); ipiv[j] = jp; if (a[jp + j * a_dim1] != 0.) { /* Apply the interchange to columns 1:N. */ if (jp != j) { dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); } /* Compute elements J+1:M of J-th column. */ if (j < *m) { if ((d__1 = a[j + j * a_dim1], abs(d__1)) >= sfmin) { i__2 = *m - j; d__1 = 1. / a[j + j * a_dim1]; dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); } else { i__2 = *m - j; for (i__ = 1; i__ <= i__2; ++i__) { a[j + i__ + j * a_dim1] /= a[j + j * a_dim1]; /* L20: */ } } } } else if (*info == 0) { *info = j; } if (j < min(*m,*n)) { /* Update trailing submatrix. */ i__2 = *m - j; i__3 = *n - j; dger_(&i__2, &i__3, &c_b151, &a[j + 1 + j * a_dim1], &c__1, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda); } /* L10: */ } return 0; /* End of DGETF2 */ } /* dgetf2_ */ /* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer * lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iinfo; extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dgetf2_( integer *, integer *, doublereal *, integer *, integer *, integer *); static integer jb, nb; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *, integer *, integer *, integer *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETRF computes an LU factorization of a general M-by-N matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). IPIV (output) INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETRF", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Determine the block size for this environment. */ nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); if (nb <= 1 || nb >= min(*m,*n)) { /* Use unblocked code. */ dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info); } else { /* Use blocked code. */ i__1 = min(*m,*n); i__2 = nb; for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Computing MIN */ i__3 = min(*m,*n) - j + 1; jb = min(i__3,nb); /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ i__3 = *m - j + 1; dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo); /* Adjust INFO and the pivot indices. */ if (*info == 0 && iinfo > 0) { *info = iinfo + j - 1; } /* Computing MIN */ i__4 = *m, i__5 = j + jb - 1; i__3 = min(i__4,i__5); for (i__ = j; i__ <= i__3; ++i__) { ipiv[i__] = j - 1 + ipiv[i__]; /* L10: */ } /* Apply interchanges to columns 1:J-1. */ i__3 = j - 1; i__4 = j + jb - 1; dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1); if (j + jb <= *n) { /* Apply interchanges to columns J+JB:N. */ i__3 = *n - j - jb + 1; i__4 = j + jb - 1; dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, & ipiv[1], &c__1); /* Compute block row of U. */ i__3 = *n - j - jb + 1; dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, & c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda); if (j + jb <= *m) { /* Update trailing submatrix. */ i__3 = *m - j - jb + 1; i__4 = *n - j - jb + 1; dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb, &c_b151, &a[j + jb + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda, &c_b15, &a[j + jb + (j + jb) * a_dim1], lda); } } /* L20: */ } } return 0; /* End of DGETRF */ } /* dgetrf_ */ /* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs, doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer * ldb, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1; /* Local variables */ extern logical lsame_(char *, char *); extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), xerbla_( char *, integer *), dlaswp_(integer *, doublereal *, integer *, integer *, integer *, integer *, integer *); static logical notran; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETRS solves a system of linear equations A * X = B or A' * X = B with a general N-by-N matrix A using the LU factorization computed by DGETRF. Arguments ========= TRANS (input) CHARACTER*1 Specifies the form of the system of equations: = 'N': A * X = B (No transpose) = 'T': A'* X = B (Transpose) = 'C': A'* X = B (Conjugate transpose = Transpose) N (input) INTEGER The order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) The factors L and U from the factorization A = P*L*U as computed by DGETRF. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (input) INTEGER array, dimension (N) The pivot indices from DGETRF; for 1<=i<=N, row i of the matrix was interchanged with row IPIV(i). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the right hand side matrix B. On exit, the solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; notran = lsame_(trans, "N"); if (! notran && ! lsame_(trans, "T") && ! lsame_( trans, "C")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldb < max(1,*n)) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } if (notran) { /* Solve A * X = B. Apply row interchanges to the right hand sides. */ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1); /* Solve L*X = B, overwriting B with X. */ dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Solve U*X = B, overwriting B with X. */ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b15, & a[a_offset], lda, &b[b_offset], ldb); } else { /* Solve A' * X = B. Solve U'*X = B, overwriting B with X. */ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Solve L'*X = B, overwriting B with X. */ dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Apply row interchanges to the solution vectors. */ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1); } return 0; /* End of DGETRS */ } /* dgetrs_ */ /* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2[2], i__3; doublereal d__1; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static integer kbot, nmin, i__; extern logical lsame_(char *, char *); static logical initz; static doublereal workl[49]; static logical wantt, wantz; extern /* Subroutine */ int dlaqr0_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static doublereal hl[2401] /* was [49][49] */; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static logical lquery; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DHSEQR computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= JOB (input) CHARACTER*1 = 'E': compute eigenvalues only; = 'S': compute eigenvalues and the Schur form T. COMPZ (input) CHARACTER*1 = 'N': no Schur vectors are computed; = 'I': Z is initialized to the unit matrix and the matrix Z of Schur vectors of H is returned; = 'V': Z must contain an orthogonal matrix Q on entry, and the product Q*Z is returned. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise ILO and IHI should be set to 1 and N respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and JOB = 'S', then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) Unlike earlier versions of DHSEQR, this subroutine may explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) The real and imaginary parts, respectively, of the computed eigenvalues. If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) If COMPZ = 'N', Z is not referenced. If COMPZ = 'I', on entry Z need not be set and on exit, if INFO = 0, Z contains the orthogonal matrix Z of the Schur vectors of H. If COMPZ = 'V', on entry Z must contain an N-by-N matrix Q, which is assumed to be equal to the unit matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, if INFO = 0, Z contains Q*Z. Normally Q is the orthogonal matrix generated by DORGHR after the call to DGEHRD which formed the Hessenberg matrix H. (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if COMPZ = 'I' or COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DHSEQR does a workspace query. In this case, DHSEQR checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .LT. 0: if INFO = -i, the i-th argument had an illegal value .GT. 0: if INFO = i, DHSEQR failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and JOB = 'E', then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and JOB = 'S', then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and COMPZ = 'V', then on exit (final value of Z) = (initial value of Z)*U where U is the orthogonal matrix in (*) (regard- less of the value of JOB.) If INFO .GT. 0 and COMPZ = 'I', then on exit (final value of Z) = U where U is the orthogonal matrix in (*) (regard- less of the value of JOB.) If INFO .GT. 0 and COMPZ = 'N', then Z is not accessed. ================================================================ Default values supplied by ILAENV(ISPEC,'DHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). It is suggested that these defaults be adjusted in order to attain best performance in each particular computational environment. ISPEC=1: The DLAHQR vs DLAQR0 crossover point. Default: 75. (Must be at least 11.) ISPEC=2: Recommended deflation window size. This depends on ILO, IHI and NS. NS is the number of simultaneous shifts returned by ILAENV(ISPEC=4). (See ISPEC=4 below.) The default for (IHI-ILO+1).LE.500 is NS. The default for (IHI-ILO+1).GT.500 is 3*NS/2. ISPEC=3: Nibble crossover point. (See ILAENV for details.) Default: 14% of deflation window size. ISPEC=4: Number of simultaneous shifts, NS, in a multi-shift QR iteration. If IHI-ILO+1 is ... greater than ...but less ... the or equal to ... than default is 1 30 NS - 2(+) 30 60 NS - 4(+) 60 150 NS = 10(+) 150 590 NS = ** 590 3000 NS = 64 3000 6000 NS = 128 6000 infinity NS = 256 (+) By default some or all matrices of this order are passed to the implicit double shift routine DLAHQR and NS is ignored. See ISPEC=1 above and comments in IPARM for details. The asterisks (**) indicate an ad-hoc function of N increasing from 10 to 64. ISPEC=5: Select structured matrix multiply. (See ILAENV for details.) Default: 3. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== NL allocates some local workspace to help small matrices . through a rare DLAHQR failure. NL .GT. NTINY = 11 is . required and NL .LE. NMIN = ILAENV(ISPEC=1,...) is recom- . mended. (The default value of NMIN is 75.) Using NL = 49 . allows up to six simultaneous shifts and a 16-by-16 . deflation window. ==== ==== Decode and check the input parameters. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ wantt = lsame_(job, "S"); initz = lsame_(compz, "I"); wantz = initz || lsame_(compz, "V"); work[1] = (doublereal) max(1,*n); lquery = *lwork == -1; *info = 0; if (! lsame_(job, "E") && ! wantt) { *info = -1; } else if (! lsame_(compz, "N") && ! wantz) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*ldh < max(1,*n)) { *info = -7; } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) { *info = -11; } else if (*lwork < max(1,*n) && ! lquery) { *info = -13; } if (*info != 0) { /* ==== Quick return in case of invalid argument. ==== */ i__1 = -(*info); xerbla_("DHSEQR", &i__1); return 0; } else if (*n == 0) { /* ==== Quick return in case N = 0; nothing to do. ==== */ return 0; } else if (lquery) { /* ==== Quick return in case of a workspace query ==== */ dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[ 1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); /* ==== Ensure reported workspace size is backward-compatible with . previous LAPACK versions. ==== Computing MAX */ d__1 = (doublereal) max(1,*n); work[1] = max(d__1,work[1]); return 0; } else { /* ==== copy eigenvalues isolated by DGEBAL ==== */ i__1 = *ilo - 1; for (i__ = 1; i__ <= i__1; ++i__) { wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; /* L10: */ } i__1 = *n; for (i__ = *ihi + 1; i__ <= i__1; ++i__) { wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; /* L20: */ } /* ==== Initialize Z, if requested ==== */ if (initz) { dlaset_("A", n, n, &c_b29, &c_b15, &z__[z_offset], ldz) ; } /* ==== Quick return if possible ==== */ if (*ilo == *ihi) { wr[*ilo] = h__[*ilo + *ilo * h_dim1]; wi[*ilo] = 0.; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== Writing concatenation */ i__2[0] = 1, a__1[0] = job; i__2[1] = 1, a__1[1] = compz; s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2); nmin = ilaenv_(&c__12, "DHSEQR", ch__1, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nmin = max(11,nmin); /* ==== DLAQR0 for big matrices; DLAHQR for small ones ==== */ if (*n > nmin) { dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); } else { /* ==== Small matrix ==== */ dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, info); if (*info > 0) { /* ==== A rare DLAHQR failure! DLAQR0 sometimes succeeds . when DLAHQR fails. ==== */ kbot = *info; if (*n >= 49) { /* ==== Larger matrices have enough subdiagonal scratch . space to call DLAQR0 directly. ==== */ dlaqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); } else { /* ==== Tiny matrices don't have enough subdiagonal . scratch space to benefit from DLAQR0. Hence, . tiny matrices must be copied into a larger . array before calling DLAQR0. ==== */ dlacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49); hl[*n + 1 + *n * 49 - 50] = 0.; i__1 = 49 - *n; dlaset_("A", &c__49, &i__1, &c_b29, &c_b29, &hl[(*n + 1) * 49 - 49], &c__49); dlaqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, & wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, workl, &c__49, info); if (wantt || *info != 0) { dlacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh); } } } } /* ==== Clear out the trash, if necessary. ==== */ if ((wantt || *info != 0) && *n > 2) { i__1 = *n - 2; i__3 = *n - 2; dlaset_("L", &i__1, &i__3, &c_b29, &c_b29, &h__[h_dim1 + 3], ldh); } /* ==== Ensure reported workspace size is backward-compatible with . previous LAPACK versions. ==== Computing MAX */ d__1 = (doublereal) max(1,*n); work[1] = max(d__1,work[1]); } /* ==== End of DHSEQR ==== */ return 0; } /* dhseqr_ */ /* Subroutine */ int dlabad_(doublereal *small, doublereal *large) { /* Builtin functions */ double d_lg10(doublereal *), sqrt(doublereal); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLABAD takes as input the values computed by DLAMCH for underflow and overflow, and returns the square root of each of these values if the log of LARGE is sufficiently large. This subroutine is intended to identify machines with a large exponent range, such as the Crays, and redefine the underflow and overflow limits to be the square roots of the values computed by DLAMCH. This subroutine is needed because DLAMCH does not compensate for poor arithmetic in the upper half of the exponent range, as is found on a Cray. Arguments ========= SMALL (input/output) DOUBLE PRECISION On entry, the underflow threshold as computed by DLAMCH. On exit, if LOG10(LARGE) is sufficiently large, the square root of SMALL, otherwise unchanged. LARGE (input/output) DOUBLE PRECISION On entry, the overflow threshold as computed by DLAMCH. On exit, if LOG10(LARGE) is sufficiently large, the square root of LARGE, otherwise unchanged. ===================================================================== If it looks like we're on a Cray, take the square root of SMALL and LARGE to avoid overflow and underflow problems. */ if (d_lg10(large) > 2e3) { *small = sqrt(*small); *large = sqrt(*large); } return 0; /* End of DLABAD */ } /* dlabad_ */ /* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal * a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer *ldy) { /* System generated locals */ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLABRD reduces the first NB rows and columns of a real general m by n matrix A to upper or lower bidiagonal form by an orthogonal transformation Q' * A * P, and returns the matrices X and Y which are needed to apply the transformation to the unreduced part of A. If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower bidiagonal form. This is an auxiliary routine called by DGEBRD Arguments ========= M (input) INTEGER The number of rows in the matrix A. N (input) INTEGER The number of columns in the matrix A. NB (input) INTEGER The number of leading rows and columns of A to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n general matrix to be reduced. On exit, the first NB rows and columns of the matrix are overwritten; the rest of the array is unchanged. If m >= n, elements on and below the diagonal in the first NB columns, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors; and elements above the diagonal in the first NB rows, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. If m < n, elements below the diagonal in the first NB columns, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and elements on and above the diagonal in the first NB rows, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (NB) The diagonal elements of the first NB rows and columns of the reduced matrix. D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (NB) The off-diagonal elements of the first NB rows and columns of the reduced matrix. TAUQ (output) DOUBLE PRECISION array dimension (NB) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (NB) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. X (output) DOUBLE PRECISION array, dimension (LDX,NB) The m-by-nb matrix X required to update the unreduced part of A. LDX (input) INTEGER The leading dimension of the array X. LDX >= M. Y (output) DOUBLE PRECISION array, dimension (LDY,NB) The n-by-nb matrix Y required to update the unreduced part of A. LDY (input) INTEGER The leading dimension of the array Y. LDY >= N. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors. If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The elements of the vectors v and u together form the m-by-nb matrix V and the nb-by-n matrix U' which are needed, with X and Y, to apply the transformation to the unreduced part of the matrix, using a block update of the form: A := A - V*Y' - X*U'. The contents of A on exit are illustrated by the following examples with nb = 2: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) ( v1 v2 a a a ) ( v1 1 a a a a ) ( v1 v2 a a a ) ( v1 v2 a a a a ) ( v1 v2 a a a ) ( v1 v2 a a a a ) ( v1 v2 a a a ) where a denotes an element of the original matrix which is unchanged, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1 * 1; y -= y_offset; /* Function Body */ if (*m <= 0 || *n <= 0) { return 0; } if (*m >= *n) { /* Reduce to upper bidiagonal form */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:m,i) */ i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[i__ + i__ * a_dim1], &c__1); /* Generate reflection Q(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); d__[i__] = a[i__ + i__ * a_dim1]; if (i__ < *n) { a[i__ + i__ * a_dim1] = 1.; /* Compute Y(i+1:n,i) */ i__2 = *m - i__ + 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + (i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &x[i__ + x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * y_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); /* Update A(i,i+1:n) */ i__2 = *n - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &y[i__ + 1 + y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[ i__ + (i__ + 1) * a_dim1], lda); /* Generate reflection P(i) to annihilate A(i,i+2:n) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( i__3,*n) * a_dim1], lda, &taup[i__]); e[i__] = a[i__ + (i__ + 1) * a_dim1]; a[i__ + (i__ + 1) * a_dim1] = 1.; /* Compute X(i+1:m,i) */ i__2 = *m - i__; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + ( i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[i__ + 1 + i__ * x_dim1], &c__1); i__2 = *n - i__; dgemv_("Transpose", &i__2, &i__, &c_b15, &y[i__ + 1 + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[ i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &a[i__ + 1 + a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b29, &x[i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = *m - i__; dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); } /* L10: */ } } else { /* Reduce to lower bidiagonal form */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i,i:n) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] , lda); i__2 = i__ - 1; i__3 = *n - i__ + 1; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[i__ * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[i__ + i__ * a_dim1] , lda); /* Generate reflection P(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1], lda, &taup[i__]); d__[i__] = a[i__ + i__ * a_dim1]; if (i__ < *m) { a[i__ + i__ * a_dim1] = 1.; /* Compute X(i+1:m,i) */ i__2 = *m - i__; i__3 = *n - i__ + 1; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + i__ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, & x[i__ + 1 + i__ * x_dim1], &c__1); i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &y[i__ + y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__ + 1; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ * a_dim1 + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[ i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = *m - i__; dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); /* Update A(i+1:m,i) */ i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *m - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &x[i__ + 1 + x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[ i__ + 1 + i__ * a_dim1], &c__1); /* Generate reflection Q(i) to annihilate A(i+2:m,i) */ i__2 = *m - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute Y(i+1:n,i) */ i__2 = *m - i__; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__; dgemv_("Transpose", &i__2, &i__, &c_b15, &x[i__ + 1 + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; dgemv_("Transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); } /* L20: */ } } return 0; /* End of DLABRD */ } /* dlabrd_ */ /* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal * a, integer *lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLACPY copies all or part of a two-dimensional matrix A to another matrix B. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be copied to B. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of the matrix A M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (output) DOUBLE PRECISION array, dimension (LDB,N) On exit, B = A in the locations specified by UPLO. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,M). ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = min(j,*m); for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L10: */ } /* L20: */ } } else if (lsame_(uplo, "L")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L30: */ } /* L40: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L50: */ } /* L60: */ } } return 0; /* End of DLACPY */ } /* dlacpy_ */ /* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__, doublereal *d__, doublereal *p, doublereal *q) { static doublereal e, f; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLADIV performs complex division in real arithmetic a + i*b p + i*q = --------- c + i*d The algorithm is due to Robert L. Smith and can be found in D. Knuth, The art of Computer Programming, Vol.2, p.195 Arguments ========= A (input) DOUBLE PRECISION B (input) DOUBLE PRECISION C (input) DOUBLE PRECISION D (input) DOUBLE PRECISION The scalars a, b, c, and d in the above expression. P (output) DOUBLE PRECISION Q (output) DOUBLE PRECISION The scalars p and q in the above expression. ===================================================================== */ if (abs(*d__) < abs(*c__)) { e = *d__ / *c__; f = *c__ + *d__ * e; *p = (*a + *b * e) / f; *q = (*b - *a * e) / f; } else { e = *c__ / *d__; f = *d__ + *c__ * e; *p = (*b + *a * e) / f; *q = (-(*a) + *b * e) / f; } return 0; /* End of DLADIV */ } /* dladiv_ */ /* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *rt1, doublereal *rt2) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal acmn, acmx, ab, df, tb, sm, rt, adf; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix [ A B ] [ B C ]. On return, RT1 is the eigenvalue of larger absolute value, and RT2 is the eigenvalue of smaller absolute value. Arguments ========= A (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. B (input) DOUBLE PRECISION The (1,2) and (2,1) elements of the 2-by-2 matrix. C (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. RT1 (output) DOUBLE PRECISION The eigenvalue of larger absolute value. RT2 (output) DOUBLE PRECISION The eigenvalue of smaller absolute value. Further Details =============== RT1 is accurate to a few ulps barring over/underflow. RT2 may be inaccurate if there is massive cancellation in the determinant A*C-B*B; higher precision or correctly rounded or correctly truncated arithmetic would be needed to compute RT2 accurately in all cases. Overflow is possible only if RT1 is within a factor of 5 of overflow. Underflow is harmless if the input data is 0 or exceeds underflow_threshold / macheps. ===================================================================== Compute the eigenvalues */ sm = *a + *c__; df = *a - *c__; adf = abs(df); tb = *b + *b; ab = abs(tb); if (abs(*a) > abs(*c__)) { acmx = *a; acmn = *c__; } else { acmx = *c__; acmn = *a; } if (adf > ab) { /* Computing 2nd power */ d__1 = ab / adf; rt = adf * sqrt(d__1 * d__1 + 1.); } else if (adf < ab) { /* Computing 2nd power */ d__1 = adf / ab; rt = ab * sqrt(d__1 * d__1 + 1.); } else { /* Includes case AB=ADF=0 */ rt = ab * sqrt(2.); } if (sm < 0.) { *rt1 = (sm - rt) * .5; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else if (sm > 0.) { *rt1 = (sm + rt) * .5; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else { /* Includes case RT1 = RT2 = 0 */ *rt1 = rt * .5; *rt2 = rt * -.5; } return 0; /* End of DLAE2 */ } /* dlae2_ */ /* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n, doublereal *d__, doublereal *e, doublereal *q, integer *ldq, doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double log(doublereal); integer pow_ii(integer *, integer *); /* Local variables */ static doublereal temp; static integer curr, i__, j, k; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iperm; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer indxq, iwrem; extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer iqptr; extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *); static integer tlvls, iq; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static integer igivcl; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer igivnm, submat, curprb, subpbs, igivpt; extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer curlvl, matsiz, iprmpt, smlsiz, lgn, msd2, smm1, spm1, spm2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED0 computes all eigenvalues and corresponding eigenvectors of a symmetric tridiagonal matrix using the divide and conquer method. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. = 2: Compute eigenvalues and eigenvectors of tridiagonal matrix. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the main diagonal of the tridiagonal matrix. On exit, its eigenvalues. E (input) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, Q must contain an N-by-N orthogonal matrix. If ICOMPQ = 0 Q is not referenced. If ICOMPQ = 1 On entry, Q is a subset of the columns of the orthogonal matrix used to reduce the full matrix to tridiagonal form corresponding to the subset of the full matrix which is being decomposed at this time. If ICOMPQ = 2 On entry, Q will be the identity matrix. On exit, Q contains the eigenvectors of the tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. If eigenvectors are desired, then LDQ >= max(1,N). In any case, LDQ >= 1. QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N) Referenced only when ICOMPQ = 1. Used to store parts of the eigenvector matrix when the updating matrix multiplies take place. LDQS (input) INTEGER The leading dimension of the array QSTORE. If ICOMPQ = 1, then LDQS >= max(1,N). In any case, LDQS >= 1. WORK (workspace) DOUBLE PRECISION array, If ICOMPQ = 0 or 1, the dimension of WORK must be at least 1 + 3*N + 2*N*lg N + 2*N**2 ( lg( N ) = smallest integer k such that 2^k >= N ) If ICOMPQ = 2, the dimension of WORK must be at least 4*N + N**2. IWORK (workspace) INTEGER array, If ICOMPQ = 0 or 1, the dimension of IWORK must be at least 6 + 6*N + 5*N*lg N. ( lg( N ) = smallest integer k such that 2^k >= N ) If ICOMPQ = 2, the dimension of IWORK must be at least 3 + 5*N. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; qstore_dim1 = *ldqs; qstore_offset = 1 + qstore_dim1 * 1; qstore -= qstore_offset; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 2) { *info = -1; } else if (*icompq == 1 && *qsiz < max(0,*n)) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ldq < max(1,*n)) { *info = -7; } else if (*ldqs < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED0", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); /* Determine the size and placement of the submatrices, and save in the leading elements of IWORK. */ iwork[1] = *n; subpbs = 1; tlvls = 0; L10: if (iwork[subpbs] > smlsiz) { for (j = subpbs; j >= 1; --j) { iwork[j * 2] = (iwork[j] + 1) / 2; iwork[(j << 1) - 1] = iwork[j] / 2; /* L20: */ } ++tlvls; subpbs <<= 1; goto L10; } i__1 = subpbs; for (j = 2; j <= i__1; ++j) { iwork[j] += iwork[j - 1]; /* L30: */ } /* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 using rank-1 modifications (cuts). */ spm1 = subpbs - 1; i__1 = spm1; for (i__ = 1; i__ <= i__1; ++i__) { submat = iwork[i__] + 1; smm1 = submat - 1; d__[smm1] -= (d__1 = e[smm1], abs(d__1)); d__[submat] -= (d__1 = e[smm1], abs(d__1)); /* L40: */ } indxq = (*n << 2) + 3; if (*icompq != 2) { /* Set up workspaces for eigenvalues only/accumulate new vectors routine */ temp = log((doublereal) (*n)) / log(2.); lgn = (integer) temp; if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } iprmpt = indxq + *n + 1; iperm = iprmpt + *n * lgn; iqptr = iperm + *n * lgn; igivpt = iqptr + *n + 2; igivcl = igivpt + *n * lgn; igivnm = 1; iq = igivnm + (*n << 1) * lgn; /* Computing 2nd power */ i__1 = *n; iwrem = iq + i__1 * i__1 + 1; /* Initialize pointers */ i__1 = subpbs; for (i__ = 0; i__ <= i__1; ++i__) { iwork[iprmpt + i__] = 1; iwork[igivpt + i__] = 1; /* L50: */ } iwork[iqptr] = 1; } /* Solve each submatrix eigenproblem at the bottom of the divide and conquer tree. */ curr = 0; i__1 = spm1; for (i__ = 0; i__ <= i__1; ++i__) { if (i__ == 0) { submat = 1; matsiz = iwork[1]; } else { submat = iwork[i__] + 1; matsiz = iwork[i__ + 1] - iwork[i__]; } if (*icompq == 2) { dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat + submat * q_dim1], ldq, &work[1], info); if (*info != 0) { goto L130; } } else { dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 + iwork[iqptr + curr]], &matsiz, &work[1], info); if (*info != 0) { goto L130; } if (*icompq == 1) { dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b15, &q[submat * q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]], &matsiz, &c_b29, &qstore[submat * qstore_dim1 + 1], ldqs); } /* Computing 2nd power */ i__2 = matsiz; iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2; ++curr; } k = 1; i__2 = iwork[i__ + 1]; for (j = submat; j <= i__2; ++j) { iwork[indxq + j] = k; ++k; /* L60: */ } /* L70: */ } /* Successively merge eigensystems of adjacent submatrices into eigensystem for the corresponding larger matrix. while ( SUBPBS > 1 ) */ curlvl = 1; L80: if (subpbs > 1) { spm2 = subpbs - 2; i__1 = spm2; for (i__ = 0; i__ <= i__1; i__ += 2) { if (i__ == 0) { submat = 1; matsiz = iwork[2]; msd2 = iwork[1]; curprb = 0; } else { submat = iwork[i__] + 1; matsiz = iwork[i__ + 2] - iwork[i__]; msd2 = matsiz / 2; ++curprb; } /* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) into an eigensystem of size MATSIZ. DLAED1 is used only for the full eigensystem of a tridiagonal matrix. DLAED7 handles the cases in which eigenvalues only or eigenvalues and eigenvectors of a full symmetric matrix (which was reduced to tridiagonal form) are desired. */ if (*icompq == 2) { dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1], ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], & msd2, &work[1], &iwork[subpbs + 1], info); } else { dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[ submat], &qstore[submat * qstore_dim1 + 1], ldqs, & iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, & work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm] , &iwork[igivpt], &iwork[igivcl], &work[igivnm], & work[iwrem], &iwork[subpbs + 1], info); } if (*info != 0) { goto L130; } iwork[i__ / 2 + 1] = iwork[i__ + 2]; /* L90: */ } subpbs /= 2; ++curlvl; goto L80; } /* end while Re-merge the eigenvalues/vectors which were deflated at the final merge step. */ if (*icompq == 1) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1], &c__1); /* L100: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); } else if (*icompq == 2) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1); /* L110: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq); } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; /* L120: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); } goto L140; L130: *info = submat * (*n + 1) + submat + matsiz - 1; L140: return 0; /* End of DLAED0 */ } /* dlaed0_ */ /* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; /* Local variables */ static integer indx, i__, k, indxc; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer indxp; extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *), dlaed3_(integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, doublereal *, integer *); static integer n1, n2, idlmda, is, iw, iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer coltyp, iq2, zpp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED1 computes the updated eigensystem of a diagonal matrix after modification by a rank-one symmetric matrix. This routine is used only for the eigenproblem which requires all eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles the case in which eigenvalues only or eigenvalues and eigenvectors of a full symmetric matrix (which was reduced to tridiagonal form) are desired. T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) where Z = Q'u, u is a vector of length N with ones in the CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. The eigenvectors of the original matrix are stored in Q, and the eigenvalues are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple eigenvalues or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLAED2. The second stage consists of calculating the updated eigenvalues. This is done by finding the roots of the secular equation via the routine DLAED4 (as called by DLAED3). This routine also calculates the eigenvectors of the current problem. The final stage consists of computing the updated eigenvectors directly using the updated eigenvalues. The eigenvectors for the current problem are multiplied with the eigenvectors from the overall problem. Arguments ========= N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the rank-1-perturbed matrix. On exit, the eigenvalues of the repaired matrix. Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, the eigenvectors of the rank-1-perturbed matrix. On exit, the eigenvectors of the repaired tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input/output) INTEGER array, dimension (N) On entry, the permutation which separately sorts the two subproblems in D into ascending order. On exit, the permutation which will reintegrate the subproblems back into sorted order, i.e. D( INDXQ( I = 1, N ) ) will be in ascending order. RHO (input) DOUBLE PRECISION The subdiagonal entry used to create the rank-1 modification. CUTPNT (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N/2. WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2) IWORK (workspace) INTEGER array, dimension (4*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --work; --iwork; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*ldq < max(1,*n)) { *info = -4; } else /* if(complicated condition) */ { /* Computing MIN */ i__1 = 1, i__2 = *n / 2; if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) { *info = -7; } } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED1", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* The following values are integer pointers which indicate the portion of the workspace used by a particular array in DLAED2 and DLAED3. */ iz = 1; idlmda = iz + *n; iw = idlmda + *n; iq2 = iw + *n; indx = 1; indxc = indx + *n; coltyp = indxc + *n; indxp = coltyp + *n; /* Form the z-vector which consists of the last row of Q_1 and the first row of Q_2. */ dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1); zpp1 = *cutpnt + 1; i__1 = *n - *cutpnt; dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1); /* Deflate eigenvalues. */ dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[ indxc], &iwork[indxp], &iwork[coltyp], info); if (*info != 0) { goto L20; } /* Solve Secular Equation. */ if (k != 0) { is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp + 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2; dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda], &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[ is], info); if (*info != 0) { goto L20; } /* Prepare the INDXQ sorting permutation. */ n1 = k; n2 = *n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indxq[i__] = i__; /* L10: */ } } L20: return 0; /* End of DLAED1 */ } /* dlaed1_ */ /* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal * d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2, integer *indx, integer *indxc, integer *indxp, integer *coltyp, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer imax, jmax; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer ctot[4]; static doublereal c__; static integer i__, j; static doublereal s, t; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer k2, n2; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ct, nj; static integer pj, js; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer iq1, iq2, n1p1; static doublereal eps, tau, tol; static integer psm[4]; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED2 merges the two sets of eigenvalues together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more eigenvalues are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. Arguments ========= K (output) INTEGER The number of non-deflated eigenvalues, and the order of the related secular equation. 0 <= K <=N. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. N1 (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= N1 <= N/2. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the eigenvalues of the two submatrices to be combined. On exit, D contains the trailing (N-K) updated eigenvalues (those which were deflated) sorted into increasing order. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, Q contains the eigenvectors of two submatrices in the two square blocks with corners at (1,1), (N1,N1) and (N1+1, N1+1), (N,N). On exit, Q contains the trailing (N-K) updated eigenvectors (those which were deflated) in its last N-K columns. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input/output) INTEGER array, dimension (N) The permutation which separately sorts the two sub-problems in D into ascending order. Note that elements in the second half of this permutation must first have N1 added to their values. Destroyed on exit. RHO (input/output) DOUBLE PRECISION On entry, the off-diagonal element associated with the rank-1 cut which originally split the two submatrices which are now being recombined. On exit, RHO has been modified to the value required by DLAED3. Z (input) DOUBLE PRECISION array, dimension (N) On entry, Z contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). On exit, the contents of Z have been destroyed by the updating process. DLAMDA (output) DOUBLE PRECISION array, dimension (N) A copy of the first K eigenvalues which will be used by DLAED3 to form the secular equation. W (output) DOUBLE PRECISION array, dimension (N) The first k values of the final deflation-altered z-vector which will be passed to DLAED3. Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2) A copy of the first K eigenvectors which will be used by DLAED3 in a matrix multiply (DGEMM) to solve for the new eigenvectors. INDX (workspace) INTEGER array, dimension (N) The permutation used to sort the contents of DLAMDA into ascending order. INDXC (output) INTEGER array, dimension (N) The permutation used to arrange the columns of the deflated Q matrix into three groups: the first group contains non-zero elements only at and above N1, the second contains non-zero elements only below N1, and the third is dense. INDXP (workspace) INTEGER array, dimension (N) The permutation used to place deflated values of D at the end of the array. INDXP(1:K) points to the nondeflated D-values and INDXP(K+1:N) points to the deflated eigenvalues. COLTYP (workspace/output) INTEGER array, dimension (N) During execution, a label which will indicate which of the following types a column in the Q2 matrix is: 1 : non-zero in the upper half only; 2 : dense; 3 : non-zero in the lower half only; 4 : deflated. On exit, COLTYP(i) is the number of columns of type i, for i=1 to 4 only. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --z__; --dlamda; --w; --q2; --indx; --indxc; --indxp; --coltyp; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; } else if (*ldq < max(1,*n)) { *info = -6; } else /* if(complicated condition) */ { /* Computing MIN */ i__1 = 1, i__2 = *n / 2; if (min(i__1,i__2) > *n1 || *n / 2 < *n1) { *info = -3; } } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED2", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n2 = *n - *n1; n1p1 = *n1 + 1; if (*rho < 0.) { dscal_(&n2, &c_b151, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1. Since z is the concatenation of two normalized vectors, norm2(z) = sqrt(2). */ t = 1. / sqrt(2.); dscal_(n, &t, &z__[1], &c__1); /* RHO = ABS( norm(z)**2 * RHO ) */ *rho = (d__1 = *rho * 2., abs(d__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = n1p1; i__ <= i__1; ++i__) { indxq[i__] += *n1; /* L10: */ } /* re-integrate the deflated parts from the last pass */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; /* L20: */ } dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indx[i__] = indxq[indxc[i__]]; /* L30: */ } /* Calculate the allowable deflation tolerance */ imax = idamax_(n, &z__[1], &c__1); jmax = idamax_(n, &d__[1], &c__1); eps = EPSILON; /* Computing MAX */ d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2)) ; tol = eps * 8. * max(d__3,d__4); /* If the rank-1 modifier is small enough, no more needs to be done except to reorganize Q so that its columns correspond with the elements in D. */ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { *k = 0; iq2 = 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { i__ = indx[j]; dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1); dlamda[j] = d__[i__]; iq2 += *n; /* L40: */ } dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq); dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1); goto L190; } /* If there are multiple eigenvalues then the problem deflates. Here the number of equal eigenvalues are found. As each equal eigenvalue is found, an elementary reflector is computed to rotate the corresponding eigensubspace so that the corresponding components of Z are zero in this new basis. */ i__1 = *n1; for (i__ = 1; i__ <= i__1; ++i__) { coltyp[i__] = 1; /* L50: */ } i__1 = *n; for (i__ = n1p1; i__ <= i__1; ++i__) { coltyp[i__] = 3; /* L60: */ } *k = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { nj = indx[j]; if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; coltyp[nj] = 4; indxp[k2] = nj; if (j == *n) { goto L100; } } else { pj = nj; goto L80; } /* L70: */ } L80: ++j; nj = indx[j]; if (j > *n) { goto L100; } if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; coltyp[nj] = 4; indxp[k2] = nj; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[pj]; c__ = z__[nj]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); t = d__[nj] - d__[pj]; c__ /= tau; s = -s / tau; if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { /* Deflation is possible. */ z__[nj] = tau; z__[pj] = 0.; if (coltyp[nj] != coltyp[pj]) { coltyp[nj] = 2; } coltyp[pj] = 4; drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, & c__, &s); /* Computing 2nd power */ d__1 = c__; /* Computing 2nd power */ d__2 = s; t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); /* Computing 2nd power */ d__1 = s; /* Computing 2nd power */ d__2 = c__; d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); d__[pj] = t; --k2; i__ = 1; L90: if (k2 + i__ <= *n) { if (d__[pj] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = pj; ++i__; goto L90; } else { indxp[k2 + i__ - 1] = pj; } } else { indxp[k2 + i__ - 1] = pj; } pj = nj; } else { ++(*k); dlamda[*k] = d__[pj]; w[*k] = z__[pj]; indxp[*k] = pj; pj = nj; } } goto L80; L100: /* Record the last eigenvalue. */ ++(*k); dlamda[*k] = d__[pj]; w[*k] = z__[pj]; indxp[*k] = pj; /* Count up the total number of the various types of columns, then form a permutation which positions the four column types into four uniform groups (although one or more of these groups may be empty). */ for (j = 1; j <= 4; ++j) { ctot[j - 1] = 0; /* L110: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { ct = coltyp[j]; ++ctot[ct - 1]; /* L120: */ } /* PSM(*) = Position in SubMatrix (of types 1 through 4) */ psm[0] = 1; psm[1] = ctot[0] + 1; psm[2] = psm[1] + ctot[1]; psm[3] = psm[2] + ctot[2]; *k = *n - ctot[3]; /* Fill out the INDXC array so that the permutation which it induces will place all type-1 columns first, all type-2 columns next, then all type-3's, and finally all type-4's. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { js = indxp[j]; ct = coltyp[js]; indx[psm[ct - 1]] = js; indxc[psm[ct - 1]] = j; ++psm[ct - 1]; /* L130: */ } /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA and Q2 respectively. The eigenvalues/vectors which were not deflated go into the first K slots of DLAMDA and Q2 respectively, while those which were deflated go into the last N - K slots. */ i__ = 1; iq1 = 1; iq2 = (ctot[0] + ctot[1]) * *n1 + 1; i__1 = ctot[0]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); z__[i__] = d__[js]; ++i__; iq1 += *n1; /* L140: */ } i__1 = ctot[1]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); z__[i__] = d__[js]; ++i__; iq1 += *n1; iq2 += n2; /* L150: */ } i__1 = ctot[2]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); z__[i__] = d__[js]; ++i__; iq2 += n2; /* L160: */ } iq1 = iq2; i__1 = ctot[3]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1); iq2 += *n; z__[i__] = d__[js]; ++i__; /* L170: */ } /* The deflated eigenvalues and their corresponding vectors go back into the last N - K slots of D and Q respectively. */ dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq); i__1 = *n - *k; dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1); /* Copy CTOT into COLTYP for referencing in DLAED3. */ for (j = 1; j <= 4; ++j) { coltyp[j] = ctot[j - 1]; /* L180: */ } L190: return 0; /* End of DLAED2 */ } /* dlaed2_ */ /* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal * d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda, doublereal *q2, integer *indx, integer *ctot, doublereal *w, doublereal *s, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dlaed4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer n2; extern doublereal dlamc3_(doublereal *, doublereal *); static integer n12, ii, n23; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer iq2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED3 finds the roots of the secular equation, as defined by the values in D, W, and RHO, between 1 and K. It makes the appropriate calls to DLAED4 and then updates the eigenvectors by multiplying the matrix of eigenvectors of the pair of eigensystems being combined by the matrix of eigenvectors of the K-by-K system which is solved here. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= K (input) INTEGER The number of terms in the rational function to be solved by DLAED4. K >= 0. N (input) INTEGER The number of rows and columns in the Q matrix. N >= K (deflation may result in N>K). N1 (input) INTEGER The location of the last eigenvalue in the leading submatrix. min(1,N) <= N1 <= N/2. D (output) DOUBLE PRECISION array, dimension (N) D(I) contains the updated eigenvalues for 1 <= I <= K. Q (output) DOUBLE PRECISION array, dimension (LDQ,N) Initially the first K columns are used as workspace. On output the columns 1 to K contain the updated eigenvectors. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). RHO (input) DOUBLE PRECISION The value of the parameter in the rank one update equation. RHO >= 0 required. DLAMDA (input/output) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. May be changed on output by having lowest order bit set to zero on Cray X-MP, Cray Y-MP, Cray-2, or Cray C-90, as described above. Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N) The first K columns of this matrix contain the non-deflated eigenvectors for the split problem. INDX (input) INTEGER array, dimension (N) The permutation used to arrange the columns of the deflated Q matrix into three groups (see DLAED2). The rows of the eigenvectors found by DLAED4 must be likewise permuted before the matrix multiply can take place. CTOT (input) INTEGER array, dimension (4) A count of the total number of the various types of columns in Q, as described in INDX. The fourth column type is any column which has been deflated. W (input/output) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating vector. Destroyed on output. S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K Will contain the eigenvectors of the repaired matrix which will be multiplied by the previously accumulated eigenvectors to update the system. LDS (input) INTEGER The leading dimension of S. LDS >= max(1,K). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dlamda; --q2; --indx; --ctot; --w; --s; /* Function Body */ *info = 0; if (*k < 0) { *info = -1; } else if (*n < *k) { *info = -2; } else if (*ldq < max(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED3", &i__1); return 0; } /* Quick return if possible */ if (*k == 0) { return 0; } /* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), which on any of these machines zeros out the bottommost bit of DLAMDA(I) if it is 1; this makes the subsequent subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DLAMDA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DLAMDA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DLAMBDA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; /* L10: */ } i__1 = *k; for (j = 1; j <= i__1; ++j) { dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { goto L120; } /* L20: */ } if (*k == 1) { goto L110; } if (*k == 2) { i__1 = *k; for (j = 1; j <= i__1; ++j) { w[1] = q[j * q_dim1 + 1]; w[2] = q[j * q_dim1 + 2]; ii = indx[1]; q[j * q_dim1 + 1] = w[ii]; ii = indx[2]; q[j * q_dim1 + 2] = w[ii]; /* L30: */ } goto L110; } /* Compute updated W. */ dcopy_(k, &w[1], &c__1, &s[1], &c__1); /* Initialize W(I) = Q(I,I) */ i__1 = *ldq + 1; dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L40: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L50: */ } /* L60: */ } i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__1 = sqrt(-w[i__]); w[i__] = d_sign(&d__1, &s[i__]); /* L70: */ } /* Compute eigenvectors of the modified rank-1 modification. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { s[i__] = w[i__] / q[i__ + j * q_dim1]; /* L80: */ } temp = dnrm2_(k, &s[1], &c__1); i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { ii = indx[i__]; q[i__ + j * q_dim1] = s[ii] / temp; /* L90: */ } /* L100: */ } /* Compute the updated eigenvectors. */ L110: n2 = *n - *n1; n12 = ctot[1] + ctot[2]; n23 = ctot[2] + ctot[3]; dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23); iq2 = *n1 * n12 + 1; if (n23 != 0) { dgemm_("N", "N", &n2, k, &n23, &c_b15, &q2[iq2], &n2, &s[1], &n23, & c_b29, &q[*n1 + 1 + q_dim1], ldq); } else { dlaset_("A", &n2, k, &c_b29, &c_b29, &q[*n1 + 1 + q_dim1], ldq); } dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12); if (n12 != 0) { dgemm_("N", "N", n1, k, &n12, &c_b15, &q2[1], n1, &s[1], &n12, &c_b29, &q[q_offset], ldq); } else { dlaset_("A", n1, k, &c_b29, &c_b29, &q[q_dim1 + 1], ldq); } L120: return 0; /* End of DLAED3 */ } /* dlaed3_ */ /* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam, integer *info) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal dphi, dpsi; static integer iter; static doublereal temp, prew, temp1, a, b, c__; static integer j; static doublereal w, dltlb, dltub, midpt; static integer niter; static logical swtch; extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaed6_(integer *, logical *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static logical swtch3; static integer ii; static doublereal dw, zz[3]; static logical orgati; static doublereal erretm, rhoinv; static integer ip1; static doublereal del, eta, phi, eps, tau, psi; static integer iim1, iip1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the I-th updated eigenvalue of a symmetric rank-one modification to a diagonal matrix whose elements are given in the array d, and that D(i) < D(j) for i < j and that RHO > 0. This is arranged by the calling routine, and is no loss in generality. The rank-one modified system is thus diag( D ) + RHO * Z * Z_transpose. where we assume the Euclidean norm of Z is 1. The method consists of approximating the rational functions in the secular equation by simpler interpolating rational functions. Arguments ========= N (input) INTEGER The length of all arrays. I (input) INTEGER The index of the eigenvalue to be computed. 1 <= I <= N. D (input) DOUBLE PRECISION array, dimension (N) The original eigenvalues. It is assumed that they are in order, D(I) < D(J) for I < J. Z (input) DOUBLE PRECISION array, dimension (N) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension (N) If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th component. If N = 1, then DELTA(1) = 1. If N = 2, see DLAED5 for detail. The vector DELTA contains the information necessary to construct the eigenvectors by DLAED3 and DLAED9. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DLAM (output) DOUBLE PRECISION The computed lambda_I, the I-th updated eigenvalue. INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, the updating process failed. Internal Parameters =================== Logical variable ORGATI (origin-at-i?) is used for distinguishing whether D(i) or D(i+1) is treated as the origin. ORGATI = .true. origin at i ORGATI = .false. origin at i+1 Logical variable SWTCH3 (switch-for-3-poles?) is for noting if we are working with THREE poles! MAXIT is the maximum number of iterations allowed for each eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== Since this routine is called in an inner loop, we do no argument checking. Quick return for N=1 and 2. */ /* Parameter adjustments */ --delta; --z__; --d__; /* Function Body */ *info = 0; if (*n == 1) { /* Presumably, I=1 upon entry */ *dlam = d__[1] + *rho * z__[1] * z__[1]; delta[1] = 1.; return 0; } if (*n == 2) { dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam); return 0; } /* Compute machine epsilon */ eps = EPSILON; rhoinv = 1. / *rho; /* The case I = N */ if (*i__ == *n) { /* Initialize some basic variables */ ii = *n - 1; niter = 1; /* Calculate initial guess */ midpt = *rho / 2.; /* If ||Z||_2 is not one, then TEMP should be set to RHO * ||Z||_2^2 / TWO */ i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - midpt; /* L10: */ } psi = 0.; i__1 = *n - 2; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / delta[j]; /* L20: */ } c__ = rhoinv + psi; w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[* n]; if (w <= 0.) { temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho) + z__[*n] * z__[*n] / *rho; if (c__ <= temp) { tau = *rho; } else { del = d__[*n] - d__[*n - 1]; a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n] ; b = z__[*n] * z__[*n] * del; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } } /* It can be proved that D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO */ dltlb = midpt; dltub = *rho; } else { del = d__[*n] - d__[*n - 1]; a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; b = z__[*n] * z__[*n] * del; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } /* It can be proved that D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2 */ dltlb = 0.; dltub = midpt; } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - tau; /* L30: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L40: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Test for convergence */ if (abs(w) <= eps * erretm) { *dlam = d__[*i__] + tau; goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ ++niter; c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * ( dpsi + dphi); b = delta[*n - 1] * delta[*n] * w; if (c__ < 0.) { c__ = abs(c__); } if (c__ == 0.) { /* ETA = B/A ETA = RHO - TAU */ eta = dltub - tau; } else if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L50: */ } tau += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L60: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 30; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { *dlam = d__[*i__] + tau; goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (dpsi + dphi); b = delta[*n - 1] * delta[*n] * w; if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L70: */ } tau += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L80: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( dpsi + dphi); w = rhoinv + phi + psi; /* L90: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; *dlam = d__[*i__] + tau; goto L250; /* End for the case I = N */ } else { /* The case for I < N */ niter = 1; ip1 = *i__ + 1; /* Calculate initial guess */ del = d__[ip1] - d__[*i__]; midpt = del / 2.; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - midpt; /* L100: */ } psi = 0.; i__1 = *i__ - 1; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / delta[j]; /* L110: */ } phi = 0.; i__1 = *i__ + 2; for (j = *n; j >= i__1; --j) { phi += z__[j] * z__[j] / delta[j]; /* L120: */ } c__ = rhoinv + psi + phi; w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] / delta[ip1]; if (w > 0.) { /* d(i)< the ith eigenvalue < (d(i)+d(i+1))/2 We choose d(i) as origin. */ orgati = TRUE_; a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; b = z__[*i__] * z__[*i__] * del; if (a > 0.) { tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } else { tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } dltlb = 0.; dltub = midpt; } else { /* (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1) We choose d(i+1) as origin. */ orgati = FALSE_; a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; b = z__[ip1] * z__[ip1] * del; if (a < 0.) { tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( d__1)))); } else { tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / (c__ * 2.); } dltlb = -midpt; dltub = 0.; } if (orgati) { i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - tau; /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[ip1] - tau; /* L140: */ } } if (orgati) { ii = *i__; } else { ii = *i__ + 1; } iim1 = ii - 1; iip1 = ii + 1; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L150: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L160: */ } w = rhoinv + phi + psi; /* W is the value of the secular function with its ii-th element removed. */ swtch3 = FALSE_; if (orgati) { if (w < 0.) { swtch3 = TRUE_; } } else { if (w > 0.) { swtch3 = TRUE_; } } if (ii == 1 || ii == *n) { swtch3 = FALSE_; } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w += temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; /* Test for convergence */ if (abs(w) <= eps * erretm) { if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ ++niter; if (! swtch3) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / delta[*i__]; c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / delta[ip1]; c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 * d__1); } a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] * dw; b = delta[*i__] * delta[ip1] * w; if (c__ == 0.) { if (a == 0.) { if (orgati) { a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] * (dpsi + dphi); } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } } else { /* Interpolation using THREE most relevant poles */ temp = rhoinv + psi + phi; if (orgati) { temp1 = z__[iim1] / delta[iim1]; temp1 *= temp1; c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[ iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi); } else { temp1 = z__[iip1] / delta[iip1]; temp1 *= temp1; c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[ iim1]) * temp1; zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1)); zz[2] = z__[iip1] * z__[iip1]; } zz[1] = z__[ii] * z__[ii]; dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info); if (*info != 0) { goto L250; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } prew = w; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L180: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L190: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L200: */ } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + ( d__1 = tau + eta, abs(d__1)) * dw; swtch = FALSE_; if (orgati) { if (-w > abs(prew) / 10.) { swtch = TRUE_; } } else { if (w > abs(prew) / 10.) { swtch = TRUE_; } } tau += eta; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 30; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ if (! swtch3) { if (! swtch) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / delta[*i__]; c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * ( d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / delta[ip1]; c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 * d__1); } } else { temp = z__[ii] / delta[ii]; if (orgati) { dpsi += temp * temp; } else { dphi += temp * temp; } c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi; } a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] * dw; b = delta[*i__] * delta[ip1] * w; if (c__ == 0.) { if (a == 0.) { if (! swtch) { if (orgati) { a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + delta[*i__] * delta[ *i__] * (dpsi + dphi); } } else { a = delta[*i__] * delta[*i__] * dpsi + delta[ip1] * delta[ip1] * dphi; } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))); } } else { /* Interpolation using THREE most relevant poles */ temp = rhoinv + psi + phi; if (swtch) { c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi; zz[0] = delta[iim1] * delta[iim1] * dpsi; zz[2] = delta[iip1] * delta[iip1] * dphi; } else { if (orgati) { temp1 = z__[iim1] / delta[iim1]; temp1 *= temp1; c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi); } else { temp1 = z__[iip1] / delta[iip1]; temp1 *= temp1; c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * temp1; zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1)); zz[2] = z__[iip1] * z__[iip1]; } } dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info); if (*info != 0) { goto L250; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L210: */ } tau += eta; prew = w; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L220: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L230: */ } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w * prew > 0. && abs(w) > abs(prew) / 10.) { swtch = ! swtch; } /* L240: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } } L250: return 0; /* End of DLAED4 */ } /* dlaed4_ */ /* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal temp, b, c__, w, del, tau; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the I-th eigenvalue of a symmetric rank-one modification of a 2-by-2 diagonal matrix diag( D ) + RHO * Z * transpose(Z) . The diagonal elements in the array D are assumed to satisfy D(i) < D(j) for i < j . We also assume RHO > 0 and that the Euclidean norm of the vector Z is one. Arguments ========= I (input) INTEGER The index of the eigenvalue to be computed. I = 1 or I = 2. D (input) DOUBLE PRECISION array, dimension (2) The original eigenvalues. We assume D(1) < D(2). Z (input) DOUBLE PRECISION array, dimension (2) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension (2) The vector DELTA contains the information necessary to construct the eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DLAM (output) DOUBLE PRECISION The computed lambda_I, the I-th updated eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== */ /* Parameter adjustments */ --delta; --z__; --d__; /* Function Body */ del = d__[2] - d__[1]; if (*i__ == 1) { w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.; if (w > 0.) { b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[1] * z__[1] * del; /* B > ZERO, always */ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); *dlam = d__[1] + tau; delta[1] = -z__[1] / tau; delta[2] = z__[2] / (del - tau); } else { b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * del; if (b > 0.) { tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); } else { tau = (b - sqrt(b * b + c__ * 4.)) / 2.; } *dlam = d__[2] + tau; delta[1] = -z__[1] / (del + tau); delta[2] = -z__[2] / tau; } temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); delta[1] /= temp; delta[2] /= temp; } else { /* Now I=2 */ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * del; if (b > 0.) { tau = (b + sqrt(b * b + c__ * 4.)) / 2.; } else { tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); } *dlam = d__[2] + tau; delta[1] = -z__[1] / (del + tau); delta[2] = -z__[2] / tau; temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); delta[1] /= temp; delta[2] /= temp; } return 0; /* End OF DLAED5 */ } /* dlaed5_ */ /* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal * rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * tau, integer *info) { /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); /* Local variables */ static doublereal base; static integer iter; static doublereal temp, temp1, temp2, temp3, temp4, a, b, c__, f; static integer i__; static logical scale; static integer niter; static doublereal small1, small2, fc, df, sminv1, sminv2; static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv, ddf, lbd, eta, ubd, eps; /* -- LAPACK routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. February 2007 Purpose ======= DLAED6 computes the positive or negative root (closest to the origin) of z(1) z(2) z(3) f(x) = rho + --------- + ---------- + --------- d(1)-x d(2)-x d(3)-x It is assumed that if ORGATI = .true. the root is between d(2) and d(3); otherwise it is between d(1) and d(2) This routine will be called by DLAED4 when necessary. In most cases, the root sought is the smallest in magnitude, though it might not be in some extremely rare situations. Arguments ========= KNITER (input) INTEGER Refer to DLAED4 for its significance. ORGATI (input) LOGICAL If ORGATI is true, the needed root is between d(2) and d(3); otherwise it is between d(1) and d(2). See DLAED4 for further details. RHO (input) DOUBLE PRECISION Refer to the equation f(x) above. D (input) DOUBLE PRECISION array, dimension (3) D satisfies d(1) < d(2) < d(3). Z (input) DOUBLE PRECISION array, dimension (3) Each of the elements in z must be positive. FINIT (input) DOUBLE PRECISION The value of f at 0. It is more accurate than the one evaluated inside this routine (if someone wants to do so). TAU (output) DOUBLE PRECISION The root of the equation f(x). INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, failure to converge Further Details =============== 30/06/99: Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA 10/02/03: This version has a few statements commented out for thread safety (machine parameters are computed on each entry). SJH. 05/10/06: Modified from a new version of Ren-Cang Li, use Gragg-Thornton-Warner cubic convergent scheme for better stability. ===================================================================== */ /* Parameter adjustments */ --z__; --d__; /* Function Body */ *info = 0; if (*orgati) { lbd = d__[2]; ubd = d__[3]; } else { lbd = d__[1]; ubd = d__[2]; } if (*finit < 0.) { lbd = 0.; } else { ubd = 0.; } niter = 1; *tau = 0.; if (*kniter == 2) { if (*orgati) { temp = (d__[3] - d__[2]) / 2.; c__ = *rho + z__[1] / (d__[1] - d__[2] - temp); a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3]; b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2]; } else { temp = (d__[1] - d__[2]) / 2.; c__ = *rho + z__[3] / (d__[3] - d__[2] - temp); a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2]; b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1]; } /* Computing MAX */ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); temp = max(d__1,d__2); a /= temp; b /= temp; c__ /= temp; if (c__ == 0.) { *tau = b / a; } else if (a <= 0.) { *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)) )); } if (*tau < lbd || *tau > ubd) { *tau = (lbd + ubd) / 2.; } if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) { *tau = 0.; } else { temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / ( d__[3] * (d__[3] - *tau)); if (temp <= 0.) { lbd = *tau; } else { ubd = *tau; } if (abs(*finit) <= abs(temp)) { *tau = 0.; } } } /* get machine parameters for possible scaling to avoid overflow modified by Sven: parameters SMALL1, SMINV1, SMALL2, SMINV2, EPS are not SAVEd anymore between one call to the others but recomputed at each call */ eps = EPSILON; base = BASE; i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.); small1 = pow_di(&base, &i__1); sminv1 = 1. / small1; small2 = small1 * small1; sminv2 = sminv1 * sminv1; /* Determine if scaling of inputs necessary to avoid overflow when computing 1/TEMP**3 */ if (*orgati) { /* Computing MIN */ d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - * tau, abs(d__2)); temp = min(d__3,d__4); } else { /* Computing MIN */ d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - * tau, abs(d__2)); temp = min(d__3,d__4); } scale = FALSE_; if (temp <= small1) { scale = TRUE_; if (temp <= small2) { /* Scale up by power of radix nearest 1/SAFMIN**(2/3) */ sclfac = sminv2; sclinv = small2; } else { /* Scale up by power of radix nearest 1/SAFMIN**(1/3) */ sclfac = sminv1; sclinv = small1; } /* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */ for (i__ = 1; i__ <= 3; ++i__) { dscale[i__ - 1] = d__[i__] * sclfac; zscale[i__ - 1] = z__[i__] * sclfac; /* L10: */ } *tau *= sclfac; lbd *= sclfac; ubd *= sclfac; } else { /* Copy D and Z to DSCALE and ZSCALE */ for (i__ = 1; i__ <= 3; ++i__) { dscale[i__ - 1] = d__[i__]; zscale[i__ - 1] = z__[i__]; /* L20: */ } } fc = 0.; df = 0.; ddf = 0.; for (i__ = 1; i__ <= 3; ++i__) { temp = 1. / (dscale[i__ - 1] - *tau); temp1 = zscale[i__ - 1] * temp; temp2 = temp1 * temp; temp3 = temp2 * temp; fc += temp1 / dscale[i__ - 1]; df += temp2; ddf += temp3; /* L30: */ } f = *finit + *tau * fc; if (abs(f) <= 0.) { goto L60; } if (f <= 0.) { lbd = *tau; } else { ubd = *tau; } /* Iteration begins -- Use Gragg-Thornton-Warner cubic convergent scheme It is not hard to see that 1) Iterations will go up monotonically if FINIT < 0; 2) Iterations will go down monotonically if FINIT > 0. */ iter = niter + 1; for (niter = iter; niter <= 40; ++niter) { if (*orgati) { temp1 = dscale[1] - *tau; temp2 = dscale[2] - *tau; } else { temp1 = dscale[0] - *tau; temp2 = dscale[1] - *tau; } a = (temp1 + temp2) * f - temp1 * temp2 * df; b = temp1 * temp2 * f; c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf; /* Computing MAX */ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); temp = max(d__1,d__2); a /= temp; b /= temp; c__ /= temp; if (c__ == 0.) { eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } if (f * eta >= 0.) { eta = -f / df; } *tau += eta; if (*tau < lbd || *tau > ubd) { *tau = (lbd + ubd) / 2.; } fc = 0.; erretm = 0.; df = 0.; ddf = 0.; for (i__ = 1; i__ <= 3; ++i__) { temp = 1. / (dscale[i__ - 1] - *tau); temp1 = zscale[i__ - 1] * temp; temp2 = temp1 * temp; temp3 = temp2 * temp; temp4 = temp1 / dscale[i__ - 1]; fc += temp4; erretm += abs(temp4); df += temp2; ddf += temp3; /* L40: */ } f = *finit + *tau * fc; erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df; if (abs(f) <= eps * erretm) { goto L60; } if (f <= 0.) { lbd = *tau; } else { ubd = *tau; } /* L50: */ } *info = 1; L60: /* Undo scaling */ if (scale) { *tau *= sclinv; } return 0; /* End of DLAED6 */ } /* dlaed6_ */ /* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz, integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer * perm, integer *givptr, integer *givcol, doublereal *givnum, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static integer indx, curr, i__, k; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer indxc, indxp, n1, n2; extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *), dlaed9_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *), dlaeda_(integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *) ; static integer idlmda, is, iw, iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer coltyp, iq2, ptr, ldq2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED7 computes the updated eigensystem of a diagonal matrix after modification by a rank-one symmetric matrix. This routine is used only for the eigenproblem which requires all eigenvalues and optionally eigenvectors of a dense symmetric matrix that has been reduced to tridiagonal form. DLAED1 handles the case in which all eigenvalues and eigenvectors of a symmetric tridiagonal matrix are desired. T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) where Z = Q'u, u is a vector of length N with ones in the CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. The eigenvectors of the original matrix are stored in Q, and the eigenvalues are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple eigenvalues or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLAED8. The second stage consists of calculating the updated eigenvalues. This is done by finding the roots of the secular equation via the routine DLAED4 (as called by DLAED9). This routine also calculates the eigenvectors of the current problem. The final stage consists of computing the updated eigenvectors directly using the updated eigenvalues. The eigenvectors for the current problem are multiplied with the eigenvectors from the overall problem. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. TLVLS (input) INTEGER The total number of merging levels in the overall divide and conquer tree. CURLVL (input) INTEGER The current level in the overall merge routine, 0 <= CURLVL <= TLVLS. CURPBM (input) INTEGER The current problem in the current level in the overall merge routine (counting from upper left to lower right). D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the rank-1-perturbed matrix. On exit, the eigenvalues of the repaired matrix. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, the eigenvectors of the rank-1-perturbed matrix. On exit, the eigenvectors of the repaired tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (output) INTEGER array, dimension (N) The permutation which will reintegrate the subproblem just solved back into sorted order, i.e., D( INDXQ( I = 1, N ) ) will be in ascending order. RHO (input) DOUBLE PRECISION The subdiagonal element used to create the rank-1 modification. CUTPNT (input) INTEGER Contains the location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N. QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) Stores eigenvectors of submatrices encountered during divide and conquer, packed together. QPTR points to beginning of the submatrices. QPTR (input/output) INTEGER array, dimension (N+2) List of indices pointing to beginning of submatrices stored in QSTORE. The submatrices are numbered starting at the bottom left of the divide and conquer tree, from left to right and bottom to top. PRMPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in PERM a level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) indicates the size of the permutation and also the size of the full, non-deflated problem. PERM (input) INTEGER array, dimension (N lg N) Contains the permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in GIVCOL a level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) indicates the number of Givens rotations. GIVCOL (input) INTEGER array, dimension (2, N lg N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) Each number indicates the S value to be used in the corresponding Givens rotation. WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N) IWORK (workspace) INTEGER array, dimension (4*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --qstore; --qptr; --prmptr; --perm; --givptr; givcol -= 3; givnum -= 3; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*icompq == 1 && *qsiz < *n) { *info = -4; } else if (*ldq < max(1,*n)) { *info = -9; } else if (min(1,*n) > *cutpnt || *n < *cutpnt) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED7", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLAED8 and DLAED9. */ if (*icompq == 1) { ldq2 = *qsiz; } else { ldq2 = *n; } iz = 1; idlmda = iz + *n; iw = idlmda + *n; iq2 = iw + *n; is = iq2 + *n * ldq2; indx = 1; indxc = indx + *n; coltyp = indxc + *n; indxp = coltyp + *n; /* Form the z-vector which consists of the last row of Q_1 and the first row of Q_2. */ ptr = pow_ii(&c__2, tlvls) + 1; i__1 = *curlvl - 1; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *tlvls - i__; ptr += pow_ii(&c__2, &i__2); /* L10: */ } curr = ptr + *curpbm; dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], & givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz + *n], info); /* When solving the final problem, we no longer need the stored data, so we will overwrite the data from this level onto the previously used storage space. */ if (*curlvl == *tlvls) { qptr[curr] = 1; prmptr[curr] = 1; givptr[curr] = 1; } /* Sort and Deflate eigenvalues. */ dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho, cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], & perm[prmptr[curr]], &givptr[curr + 1], &givcol[(givptr[curr] << 1) + 1], &givnum[(givptr[curr] << 1) + 1], &iwork[indxp], &iwork[ indx], info); prmptr[curr + 1] = prmptr[curr] + *n; givptr[curr + 1] += givptr[curr]; /* Solve Secular Equation. */ if (k != 0) { dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda], &work[iw], &qstore[qptr[curr]], &k, info); if (*info != 0) { goto L30; } if (*icompq == 1) { dgemm_("N", "N", qsiz, &k, &k, &c_b15, &work[iq2], &ldq2, &qstore[ qptr[curr]], &k, &c_b29, &q[q_offset], ldq); } /* Computing 2nd power */ i__1 = k; qptr[curr + 1] = qptr[curr] + i__1 * i__1; /* Prepare the INDXQ sorting permutation. */ n1 = k; n2 = *n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); } else { qptr[curr + 1] = qptr[curr]; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indxq[i__] = i__; /* L20: */ } } L30: return 0; /* End of DLAED7 */ } /* dlaed7_ */ /* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda, doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer *indx, integer *info) { /* System generated locals */ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer jlam, imax, jmax; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal c__; static integer i__, j; static doublereal s, t; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer k2, n1, n2; static integer jp; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer n1p1; static doublereal eps, tau, tol; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED8 merges the two sets of eigenvalues together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more eigenvalues are close together or if there is a tiny element in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. K (output) INTEGER The number of non-deflated eigenvalues, and the order of the related secular equation. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the two submatrices to be combined. On exit, the trailing (N-K) updated eigenvalues (those which were deflated) sorted into increasing order. Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) If ICOMPQ = 0, Q is not referenced. Otherwise, on entry, Q contains the eigenvectors of the partially solved system which has been previously updated in matrix multiplies with other partially solved eigensystems. On exit, Q contains the trailing (N-K) updated eigenvectors (those which were deflated) in its last N-K columns. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input) INTEGER array, dimension (N) The permutation which separately sorts the two sub-problems in D into ascending order. Note that elements in the second half of this permutation must first have CUTPNT added to their values in order to be accurate. RHO (input/output) DOUBLE PRECISION On entry, the off-diagonal element associated with the rank-1 cut which originally split the two submatrices which are now being recombined. On exit, RHO has been modified to the value required by DLAED3. CUTPNT (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N. Z (input) DOUBLE PRECISION array, dimension (N) On entry, Z contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). On exit, the contents of Z are destroyed by the updating process. DLAMDA (output) DOUBLE PRECISION array, dimension (N) A copy of the first K eigenvalues which will be used by DLAED3 to form the secular equation. Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N) If ICOMPQ = 0, Q2 is not referenced. Otherwise, a copy of the first K eigenvectors which will be used by DLAED7 in a matrix multiply (DGEMM) to update the new eigenvectors. LDQ2 (input) INTEGER The leading dimension of the array Q2. LDQ2 >= max(1,N). W (output) DOUBLE PRECISION array, dimension (N) The first k values of the final deflation-altered z-vector and will be passed to DLAED3. PERM (output) INTEGER array, dimension (N) The permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. GIVCOL (output) INTEGER array, dimension (2, N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) Each number indicates the S value to be used in the corresponding Givens rotation. INDXP (workspace) INTEGER array, dimension (N) The permutation used to place deflated values of D at the end of the array. INDXP(1:K) points to the nondeflated D-values and INDXP(K+1:N) points to the deflated eigenvalues. INDX (workspace) INTEGER array, dimension (N) The permutation used to sort the contents of D into ascending order. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --z__; --dlamda; q2_dim1 = *ldq2; q2_offset = 1 + q2_dim1 * 1; q2 -= q2_offset; --w; --perm; givcol -= 3; givnum -= 3; --indxp; --indx; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*n < 0) { *info = -3; } else if (*icompq == 1 && *qsiz < *n) { *info = -4; } else if (*ldq < max(1,*n)) { *info = -7; } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { *info = -10; } else if (*ldq2 < max(1,*n)) { *info = -14; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED8", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n1 = *cutpnt; n2 = *n - n1; n1p1 = n1 + 1; if (*rho < 0.) { dscal_(&n2, &c_b151, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1 */ t = 1. / sqrt(2.); i__1 = *n; for (j = 1; j <= i__1; ++j) { indx[j] = j; /* L10: */ } dscal_(n, &t, &z__[1], &c__1); *rho = (d__1 = *rho * 2., abs(d__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { indxq[i__] += *cutpnt; /* L20: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; w[i__] = z__[indxq[i__]]; /* L30: */ } i__ = 1; j = *cutpnt + 1; dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = dlamda[indx[i__]]; z__[i__] = w[indx[i__]]; /* L40: */ } /* Calculate the allowable deflation tolerence */ imax = idamax_(n, &z__[1], &c__1); jmax = idamax_(n, &d__[1], &c__1); eps = EPSILON; tol = eps * 8. * (d__1 = d__[jmax], abs(d__1)); /* If the rank-1 modifier is small enough, no more needs to be done except to reorganize Q so that its columns correspond with the elements in D. */ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { *k = 0; if (*icompq == 0) { i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; /* L50: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &c__1); /* L60: */ } dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); } return 0; } /* If there are multiple eigenvalues then the problem deflates. Here the number of equal eigenvalues are found. As each equal eigenvalue is found, an elementary reflector is computed to rotate the corresponding eigensubspace so that the corresponding components of Z are zero in this new basis. */ *k = 0; *givptr = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; if (j == *n) { goto L110; } } else { jlam = j; goto L80; } /* L70: */ } L80: ++j; if (j > *n) { goto L100; } if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[jlam]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); t = d__[j] - d__[jlam]; c__ /= tau; s = -s / tau; if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { /* Deflation is possible. */ z__[j] = tau; z__[jlam] = 0.; /* Record the appropriate Givens rotation */ ++(*givptr); givcol[(*givptr << 1) + 1] = indxq[indx[jlam]]; givcol[(*givptr << 1) + 2] = indxq[indx[j]]; givnum[(*givptr << 1) + 1] = c__; givnum[(*givptr << 1) + 2] = s; if (*icompq == 1) { drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s); } t = d__[jlam] * c__ * c__ + d__[j] * s * s; d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; d__[jlam] = t; --k2; i__ = 1; L90: if (k2 + i__ <= *n) { if (d__[jlam] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = jlam; ++i__; goto L90; } else { indxp[k2 + i__ - 1] = jlam; } } else { indxp[k2 + i__ - 1] = jlam; } jlam = j; } else { ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; jlam = j; } } goto L80; L100: /* Record the last eigenvalue. */ ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; L110: /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA and Q2 respectively. The eigenvalues/vectors which were not deflated go into the first K slots of DLAMDA and Q2 respectively, while those which were deflated go into the last N - K slots. */ if (*icompq == 0) { i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; /* L120: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] , &c__1); /* L130: */ } } /* The deflated eigenvalues and their corresponding vectors go back into the last N - K slots of D and Q respectively. */ if (*k < *n) { if (*icompq == 0) { i__1 = *n - *k; dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); } else { i__1 = *n - *k; dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = *n - *k; dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(* k + 1) * q_dim1 + 1], ldq); } } return 0; /* End of DLAED8 */ } /* dlaed8_ */ /* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop, integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal * rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds, integer *info) { /* System generated locals */ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dlaed4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED9 finds the roots of the secular equation, as defined by the values in D, Z, and RHO, between KSTART and KSTOP. It makes the appropriate calls to DLAED4 and then stores the new matrix of eigenvectors for use in calculating the next level of Z vectors. Arguments ========= K (input) INTEGER The number of terms in the rational function to be solved by DLAED4. K >= 0. KSTART (input) INTEGER KSTOP (input) INTEGER The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP are to be computed. 1 <= KSTART <= KSTOP <= K. N (input) INTEGER The number of rows and columns in the Q matrix. N >= K (delation may result in N > K). D (output) DOUBLE PRECISION array, dimension (N) D(I) contains the updated eigenvalues for KSTART <= I <= KSTOP. Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N) LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max( 1, N ). RHO (input) DOUBLE PRECISION The value of the parameter in the rank one update equation. RHO >= 0 required. DLAMDA (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. W (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating vector. S (output) DOUBLE PRECISION array, dimension (LDS, K) Will contain the eigenvectors of the repaired matrix which will be stored for subsequent Z vector calculation and multiplied by the previously accumulated eigenvectors to update the system. LDS (input) INTEGER The leading dimension of S. LDS >= max( 1, K ). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dlamda; --w; s_dim1 = *lds; s_offset = 1 + s_dim1 * 1; s -= s_offset; /* Function Body */ *info = 0; if (*k < 0) { *info = -1; } else if (*kstart < 1 || *kstart > max(1,*k)) { *info = -2; } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) { *info = -3; } else if (*n < *k) { *info = -4; } else if (*ldq < max(1,*k)) { *info = -7; } else if (*lds < max(1,*k)) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED9", &i__1); return 0; } /* Quick return if possible */ if (*k == 0) { return 0; } /* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), which on any of these machines zeros out the bottommost bit of DLAMDA(I) if it is 1; this makes the subsequent subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DLAMDA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DLAMDA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DLAMBDA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; /* L10: */ } i__1 = *kstop; for (j = *kstart; j <= i__1; ++j) { dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { goto L120; } /* L20: */ } if (*k == 1 || *k == 2) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *k; for (j = 1; j <= i__2; ++j) { s[j + i__ * s_dim1] = q[j + i__ * q_dim1]; /* L30: */ } /* L40: */ } goto L120; } /* Compute updated W. */ dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1); /* Initialize W(I) = Q(I,I) */ i__1 = *ldq + 1; dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L50: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L60: */ } /* L70: */ } i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__1 = sqrt(-w[i__]); w[i__] = d_sign(&d__1, &s[i__ + s_dim1]); /* L80: */ } /* Compute eigenvectors of the modified rank-1 modification. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1]; /* L90: */ } temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1); i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp; /* L100: */ } /* L110: */ } L120: return 0; /* End of DLAED9 */ } /* dlaed9_ */ /* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl, integer *curpbm, integer *prmptr, integer *perm, integer *givptr, integer *givcol, doublereal *givnum, doublereal *q, integer *qptr, doublereal *z__, doublereal *ztemp, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; /* Builtin functions */ integer pow_ii(integer *, integer *); double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer curr, bsiz1, bsiz2, psiz1, psiz2, i__, k, zptr1; extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer mid, ptr; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEDA computes the Z vector corresponding to the merge step in the CURLVLth step of the merge process with TLVLS steps for the CURPBMth problem. Arguments ========= N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. TLVLS (input) INTEGER The total number of merging levels in the overall divide and conquer tree. CURLVL (input) INTEGER The current level in the overall merge routine, 0 <= curlvl <= tlvls. CURPBM (input) INTEGER The current problem in the current level in the overall merge routine (counting from upper left to lower right). PRMPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in PERM a level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) indicates the size of the permutation and incidentally the size of the full, non-deflated problem. PERM (input) INTEGER array, dimension (N lg N) Contains the permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in GIVCOL a level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) indicates the number of Givens rotations. GIVCOL (input) INTEGER array, dimension (2, N lg N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) Each number indicates the S value to be used in the corresponding Givens rotation. Q (input) DOUBLE PRECISION array, dimension (N**2) Contains the square eigenblocks from previous levels, the starting positions for blocks are given by QPTR. QPTR (input) INTEGER array, dimension (N+2) Contains a list of pointers which indicate where in Q an eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates the size of the block. Z (output) DOUBLE PRECISION array, dimension (N) On output this vector contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). ZTEMP (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --ztemp; --z__; --qptr; --q; givnum -= 3; givcol -= 3; --givptr; --perm; --prmptr; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAEDA", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Determine location of first number in second half. */ mid = *n / 2 + 1; /* Gather last/first rows of appropriate eigenblocks into center of Z */ ptr = 1; /* Determine location of lowest level subproblem in the full storage scheme */ i__1 = *curlvl - 1; curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1; /* Determine size of these matrices. We add HALF to the value of the SQRT in case the machine underestimates one of these square roots. */ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5); bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) + .5); i__1 = mid - bsiz1 - 1; for (k = 1; k <= i__1; ++k) { z__[k] = 0.; /* L10: */ } dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], & c__1); dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1); i__1 = *n; for (k = mid + bsiz2; k <= i__1; ++k) { z__[k] = 0.; /* L20: */ } /* Loop thru remaining levels 1 -> CURLVL applying the Givens rotations and permutation and then multiplying the center matrices against the current Z. */ ptr = pow_ii(&c__2, tlvls) + 1; i__1 = *curlvl - 1; for (k = 1; k <= i__1; ++k) { i__2 = *curlvl - k; i__3 = *curlvl - k - 1; curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) - 1; psiz1 = prmptr[curr + 1] - prmptr[curr]; psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; zptr1 = mid - psiz1; /* Apply Givens at CURR and CURR+1 */ i__2 = givptr[curr + 1] - 1; for (i__ = givptr[curr]; i__ <= i__2; ++i__) { drot_(&c__1, &z__[zptr1 + givcol[(i__ << 1) + 1] - 1], &c__1, & z__[zptr1 + givcol[(i__ << 1) + 2] - 1], &c__1, &givnum[( i__ << 1) + 1], &givnum[(i__ << 1) + 2]); /* L30: */ } i__2 = givptr[curr + 2] - 1; for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) { drot_(&c__1, &z__[mid - 1 + givcol[(i__ << 1) + 1]], &c__1, &z__[ mid - 1 + givcol[(i__ << 1) + 2]], &c__1, &givnum[(i__ << 1) + 1], &givnum[(i__ << 1) + 2]); /* L40: */ } psiz1 = prmptr[curr + 1] - prmptr[curr]; psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; i__2 = psiz1 - 1; for (i__ = 0; i__ <= i__2; ++i__) { ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1]; /* L50: */ } i__2 = psiz2 - 1; for (i__ = 0; i__ <= i__2; ++i__) { ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] - 1]; /* L60: */ } /* Multiply Blocks at CURR and CURR+1 Determine size of these matrices. We add HALF to the value of the SQRT in case the machine underestimates one of these square roots. */ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5); bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1]) ) + .5); if (bsiz1 > 0) { dgemv_("T", &bsiz1, &bsiz1, &c_b15, &q[qptr[curr]], &bsiz1, & ztemp[1], &c__1, &c_b29, &z__[zptr1], &c__1); } i__2 = psiz1 - bsiz1; dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1); if (bsiz2 > 0) { dgemv_("T", &bsiz2, &bsiz2, &c_b15, &q[qptr[curr + 1]], &bsiz2, & ztemp[psiz1 + 1], &c__1, &c_b29, &z__[mid], &c__1); } i__2 = psiz2 - bsiz2; dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], & c__1); i__2 = *tlvls - k; ptr += pow_ii(&c__2, &i__2); /* L70: */ } return 0; /* End of DLAEDA */ } /* dlaeda_ */ /* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal acmn, acmx, ab, df, cs, ct, tb, sm, tn, rt, adf, acs; static integer sgn1, sgn2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix [ A B ] [ B C ]. On return, RT1 is the eigenvalue of larger absolute value, RT2 is the eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right eigenvector for RT1, giving the decomposition [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ] [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ]. Arguments ========= A (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. B (input) DOUBLE PRECISION The (1,2) element and the conjugate of the (2,1) element of the 2-by-2 matrix. C (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. RT1 (output) DOUBLE PRECISION The eigenvalue of larger absolute value. RT2 (output) DOUBLE PRECISION The eigenvalue of smaller absolute value. CS1 (output) DOUBLE PRECISION SN1 (output) DOUBLE PRECISION The vector (CS1, SN1) is a unit right eigenvector for RT1. Further Details =============== RT1 is accurate to a few ulps barring over/underflow. RT2 may be inaccurate if there is massive cancellation in the determinant A*C-B*B; higher precision or correctly rounded or correctly truncated arithmetic would be needed to compute RT2 accurately in all cases. CS1 and SN1 are accurate to a few ulps barring over/underflow. Overflow is possible only if RT1 is within a factor of 5 of overflow. Underflow is harmless if the input data is 0 or exceeds underflow_threshold / macheps. ===================================================================== Compute the eigenvalues */ sm = *a + *c__; df = *a - *c__; adf = abs(df); tb = *b + *b; ab = abs(tb); if (abs(*a) > abs(*c__)) { acmx = *a; acmn = *c__; } else { acmx = *c__; acmn = *a; } if (adf > ab) { /* Computing 2nd power */ d__1 = ab / adf; rt = adf * sqrt(d__1 * d__1 + 1.); } else if (adf < ab) { /* Computing 2nd power */ d__1 = adf / ab; rt = ab * sqrt(d__1 * d__1 + 1.); } else { /* Includes case AB=ADF=0 */ rt = ab * sqrt(2.); } if (sm < 0.) { *rt1 = (sm - rt) * .5; sgn1 = -1; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else if (sm > 0.) { *rt1 = (sm + rt) * .5; sgn1 = 1; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else { /* Includes case RT1 = RT2 = 0 */ *rt1 = rt * .5; *rt2 = rt * -.5; sgn1 = 1; } /* Compute the eigenvector */ if (df >= 0.) { cs = df + rt; sgn2 = 1; } else { cs = df - rt; sgn2 = -1; } acs = abs(cs); if (acs > ab) { ct = -tb / cs; *sn1 = 1. / sqrt(ct * ct + 1.); *cs1 = ct * *sn1; } else { if (ab == 0.) { *cs1 = 1.; *sn1 = 0.; } else { tn = -cs / tb; *cs1 = 1. / sqrt(tn * tn + 1.); *sn1 = tn * *cs1; } } if (sgn1 == sgn2) { tn = *cs1; *cs1 = -(*sn1); *sn1 = tn; } return 0; /* End of DLAEV2 */ } /* dlaev2_ */ /* Subroutine */ int dlaexc_(logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info) { /* System generated locals */ integer q_dim1, q_offset, t_dim1, t_offset, i__1; doublereal d__1, d__2, d__3; /* Local variables */ static integer ierr; static doublereal temp; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal d__[16] /* was [4][4] */; static integer k; static doublereal u[3], scale, x[4] /* was [2][2] */, dnorm; static integer j2, j3, j4; static doublereal xnorm, u1[3], u2[3]; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlasy2_( logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer nd; static doublereal cs, t11, t22; static doublereal t33; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlarfx_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *); static doublereal thresh, smlnum, wi1, wi2, wr1, wr2, eps, tau, tau1, tau2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEXC swaps adjacent diagonal blocks T11 and T22 of order 1 or 2 in an upper quasi-triangular matrix T by an orthogonal similarity transformation. T must be in Schur canonical form, that is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block has its diagonal elemnts equal and its off-diagonal elements of opposite sign. Arguments ========= WANTQ (input) LOGICAL = .TRUE. : accumulate the transformation in the matrix Q; = .FALSE.: do not accumulate the transformation. N (input) INTEGER The order of the matrix T. N >= 0. T (input/output) DOUBLE PRECISION array, dimension (LDT,N) On entry, the upper quasi-triangular matrix T, in Schur canonical form. On exit, the updated matrix T, again in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, if WANTQ is .TRUE., the orthogonal matrix Q. On exit, if WANTQ is .TRUE., the updated matrix Q. If WANTQ is .FALSE., Q is not referenced. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= 1; and if WANTQ is .TRUE., LDQ >= N. J1 (input) INTEGER The index of the first row of the first block T11. N1 (input) INTEGER The order of the first block T11. N1 = 0, 1 or 2. N2 (input) INTEGER The order of the second block T22. N2 = 0, 1 or 2. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit = 1: the transformed matrix T would be too far from Schur form; the blocks are not swapped and T and Q are unchanged. ===================================================================== */ /* Parameter adjustments */ t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --work; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n == 0 || *n1 == 0 || *n2 == 0) { return 0; } if (*j1 + *n1 > *n) { return 0; } j2 = *j1 + 1; j3 = *j1 + 2; j4 = *j1 + 3; if (*n1 == 1 && *n2 == 1) { /* Swap two 1-by-1 blocks. */ t11 = t[*j1 + *j1 * t_dim1]; t22 = t[j2 + j2 * t_dim1]; /* Determine the transformation to perform the interchange. */ d__1 = t22 - t11; dlartg_(&t[*j1 + j2 * t_dim1], &d__1, &cs, &sn, &temp); /* Apply transformation to the matrix T. */ if (j3 <= *n) { i__1 = *n - *j1 - 1; drot_(&i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1], ldt, &cs, &sn); } i__1 = *j1 - 1; drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &c__1, &cs, &sn); t[*j1 + *j1 * t_dim1] = t22; t[j2 + j2 * t_dim1] = t11; if (*wantq) { /* Accumulate transformation in the matrix Q. */ drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &c__1, &cs, &sn); } } else { /* Swapping involves at least one 2-by-2 block. Copy the diagonal block of order N1+N2 to the local array D and compute its norm. */ nd = *n1 + *n2; dlacpy_("Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &c__4); dnorm = dlange_("Max", &nd, &nd, d__, &c__4, &work[1]); /* Compute machine-dependent threshold for test for accepting swap. */ eps = PRECISION; smlnum = SAFEMINIMUM / eps; /* Computing MAX */ d__1 = eps * 10. * dnorm; thresh = max(d__1,smlnum); /* Solve T11*X - X*T22 = scale*T12 for X. */ dlasy2_(&c_false, &c_false, &c_n1, n1, n2, d__, &c__4, &d__[*n1 + 1 + (*n1 + 1 << 2) - 5], &c__4, &d__[(*n1 + 1 << 2) - 4], &c__4, & scale, x, &c__2, &xnorm, &ierr); /* Swap the adjacent diagonal blocks. */ k = *n1 + *n1 + *n2 - 3; switch (k) { case 1: goto L10; case 2: goto L20; case 3: goto L30; } L10: /* N1 = 1, N2 = 2: generate elementary reflector H so that: ( scale, X11, X12 ) H = ( 0, 0, * ) */ u[0] = scale; u[1] = x[0]; u[2] = x[2]; dlarfg_(&c__3, &u[2], u, &c__1, &tau); u[2] = 1.; t11 = t[*j1 + *j1 * t_dim1]; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__2 = abs(d__[2]), d__3 = abs(d__[6]), d__2 = max(d__2,d__3), d__3 = (d__1 = d__[10] - t11, abs(d__1)); if (max(d__2,d__3) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j2, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]); t[j3 + *j1 * t_dim1] = 0.; t[j3 + j2 * t_dim1] = 0.; t[j3 + j3 * t_dim1] = t11; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1]); } goto L40; L20: /* N1 = 2, N2 = 1: generate elementary reflector H so that: H ( -X11 ) = ( * ) ( -X21 ) = ( 0 ) ( scale ) = ( 0 ) */ u[0] = -x[0]; u[1] = -x[1]; u[2] = scale; dlarfg_(&c__3, u, &u[1], &c__1, &tau); u[0] = 1.; t33 = t[j3 + j3 * t_dim1]; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__2 = abs(d__[1]), d__3 = abs(d__[2]), d__2 = max(d__2,d__3), d__3 = (d__1 = d__[0] - t33, abs(d__1)); if (max(d__2,d__3) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ dlarfx_("R", &j3, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]); i__1 = *n - *j1; dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[ 1]); t[*j1 + *j1 * t_dim1] = t33; t[j2 + *j1 * t_dim1] = 0.; t[j3 + *j1 * t_dim1] = 0.; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1]); } goto L40; L30: /* N1 = 2, N2 = 2: generate elementary reflectors H(1) and H(2) so that: H(2) H(1) ( -X11 -X12 ) = ( * * ) ( -X21 -X22 ) ( 0 * ) ( scale 0 ) ( 0 0 ) ( 0 scale ) ( 0 0 ) */ u1[0] = -x[0]; u1[1] = -x[1]; u1[2] = scale; dlarfg_(&c__3, u1, &u1[1], &c__1, &tau1); u1[0] = 1.; temp = -tau1 * (x[2] + u1[1] * x[3]); u2[0] = -temp * u1[1] - x[3]; u2[1] = -temp * u1[2]; u2[2] = scale; dlarfg_(&c__3, u2, &u2[1], &c__1, &tau2); u2[0] = 1.; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__4, u1, &tau1, d__, &c__4, &work[1]) ; dlarfx_("R", &c__4, &c__3, u1, &tau1, d__, &c__4, &work[1]) ; dlarfx_("L", &c__3, &c__4, u2, &tau2, &d__[1], &c__4, &work[1]); dlarfx_("R", &c__4, &c__3, u2, &tau2, &d__[4], &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__1 = abs(d__[2]), d__2 = abs(d__[6]), d__1 = max(d__1,d__2), d__2 = abs(d__[3]), d__1 = max(d__1,d__2), d__2 = abs(d__[7]); if (max(d__1,d__2) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j4, &c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[ 1]); i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j4, &c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1] ); t[j3 + *j1 * t_dim1] = 0.; t[j3 + j2 * t_dim1] = 0.; t[j4 + *j1 * t_dim1] = 0.; t[j4 + j2 * t_dim1] = 0.; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, & work[1]); dlarfx_("R", n, &c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[ 1]); } L40: if (*n2 == 2) { /* Standardize new 2-by-2 block T11 */ dlanv2_(&t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + * j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, & wi2, &cs, &sn); i__1 = *n - *j1 - 1; drot_(&i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2) * t_dim1], ldt, &cs, &sn); i__1 = *j1 - 1; drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], & c__1, &cs, &sn); if (*wantq) { drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], & c__1, &cs, &sn); } } if (*n1 == 2) { /* Standardize new 2-by-2 block T22 */ j3 = *j1 + *n2; j4 = j3 + 1; dlanv2_(&t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 * t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, & cs, &sn); if (j3 + 2 <= *n) { i__1 = *n - j3 - 1; drot_(&i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2) * t_dim1], ldt, &cs, &sn); } i__1 = j3 - 1; drot_(&i__1, &t[j3 * t_dim1 + 1], &c__1, &t[j4 * t_dim1 + 1], & c__1, &cs, &sn); if (*wantq) { drot_(n, &q[j3 * q_dim1 + 1], &c__1, &q[j4 * q_dim1 + 1], & c__1, &cs, &sn); } } } return 0; /* Exit with INFO = 1 if swap was rejected. */ L50: *info = 1; return 0; /* End of DLAEXC */ } /* dlaexc_ */ /* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer i__, j, k, l, m; static doublereal s, v[3]; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i1, i2; static doublereal t1, t2, t3, v2, v3; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal aa, ab, ba, bb; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal h11, h12, h21, h22, cs; static integer nh; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer nr; static doublereal tr; static integer nz; static doublereal safmin, safmax, rtdisc, smlnum, det, h21s; static integer its; static doublereal ulp, sum, tst, rt1i, rt2i, rt1r, rt2r; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAHQR is an auxiliary routine called by DHSEQR to update the eigenvalues and Schur decomposition already computed by DHSEQR, by dealing with the Hessenberg submatrix in rows and columns ILO to IHI. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper quasi-triangular in rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). DLAHQR works primarily with the Hessenberg submatrix in rows and columns ILO to IHI, but applies transformations to all of H if WANTT is .TRUE.. 1 <= ILO <= max(1,IHI); IHI <= N. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO is zero and if WANTT is .TRUE., H is upper quasi-triangular in rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in standard form. If INFO is zero and WANTT is .FALSE., the contents of H are unspecified on exit. The output state of H if INFO is nonzero is given below under the description of INFO. LDH (input) INTEGER The leading dimension of the array H. LDH >= max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) The real and imaginary parts, respectively, of the computed eigenvalues ILO to IHI are stored in the corresponding elements of WR and WI. If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i), and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) If WANTZ is .TRUE., on entry Z must contain the current matrix Z of transformations accumulated by DHSEQR, and on exit Z has been updated; transformations are applied only to the submatrix Z(ILOZ:IHIZ,ILO:IHI). If WANTZ is .FALSE., Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= max(1,N). INFO (output) INTEGER = 0: successful exit .GT. 0: If INFO = i, DLAHQR failed to compute all the eigenvalues ILO to IHI in a total of 30 iterations per eigenvalue; elements i+1:ihi of WR and WI contain those eigenvalues which have been successfully computed. If INFO .GT. 0 and WANTT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix rows and columns ILO thorugh INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthognal matrix. The final value of H is upper Hessenberg and triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z) = (initial value of Z)*U where U is the orthogonal matrix in (*) (regardless of the value of WANTT.) Further Details =============== 02-96 Based on modifications by David Day, Sandia National Laboratory, USA 12-04 Further modifications by Ralph Byers, University of Kansas, USA This is a modified version of DLAHQR from LAPACK version 3.0. It is (1) more robust against overflow and underflow and (2) adopts the more conservative Ahues & Tisseur stopping criterion (LAWN 122, 1997). ========================================================= */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n == 0) { return 0; } if (*ilo == *ihi) { wr[*ilo] = h__[*ilo + *ilo * h_dim1]; wi[*ilo] = 0.; return 0; } /* ==== clear out the trash ==== */ i__1 = *ihi - 3; for (j = *ilo; j <= i__1; ++j) { h__[j + 2 + j * h_dim1] = 0.; h__[j + 3 + j * h_dim1] = 0.; /* L10: */ } if (*ilo <= *ihi - 2) { h__[*ihi + (*ihi - 2) * h_dim1] = 0.; } nh = *ihi - *ilo + 1; nz = *ihiz - *iloz + 1; /* Set machine-dependent constants for the stopping criterion. */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) nh / ulp); /* I1 and I2 are the indices of the first row and last column of H to which transformations must be applied. If eigenvalues only are being computed, I1 and I2 are set inside the main loop. */ if (*wantt) { i1 = 1; i2 = *n; } /* The main loop begins here. I is the loop index and decreases from IHI to ILO in steps of 1 or 2. Each iteration of the loop works with the active submatrix in rows and columns L to I. Eigenvalues I+1 to IHI have already converged. Either L = ILO or H(L,L-1) is negligible so that the matrix splits. */ i__ = *ihi; L20: l = *ilo; if (i__ < *ilo) { goto L160; } /* Perform QR iterations on rows and columns ILO to I until a submatrix of order 1 or 2 splits off at the bottom because a subdiagonal element has become negligible. */ for (its = 0; its <= 30; ++its) { /* Look for a single small subdiagonal element. */ i__1 = l + 1; for (k = i__; k >= i__1; --k) { if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= smlnum) { goto L40; } tst = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 = h__[k + k * h_dim1], abs(d__2)); if (tst == 0.) { if (k - 2 >= *ilo) { tst += (d__1 = h__[k - 1 + (k - 2) * h_dim1], abs(d__1)); } if (k + 1 <= *ihi) { tst += (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)); } } /* ==== The following is a conservative small subdiagonal . deflation criterion due to Ahues & Tisseur (LAWN 122, . 1997). It has better mathematical foundation and . improves accuracy in some cases. ==== */ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= ulp * tst) { /* Computing MAX */ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = ( d__2 = h__[k - 1 + k * h_dim1], abs(d__2)); ab = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = ( d__2 = h__[k - 1 + k * h_dim1], abs(d__2)); ba = min(d__3,d__4); /* Computing MAX */ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1], abs(d__2)); aa = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1], abs(d__2)); bb = min(d__3,d__4); s = aa + ab; /* Computing MAX */ d__1 = smlnum, d__2 = ulp * (bb * (aa / s)); if (ba * (ab / s) <= max(d__1,d__2)) { goto L40; } } /* L30: */ } L40: l = k; if (l > *ilo) { /* H(L,L-1) is negligible */ h__[l + (l - 1) * h_dim1] = 0.; } /* Exit from loop if a submatrix of order 1 or 2 has split off. */ if (l >= i__ - 1) { goto L150; } /* Now the active submatrix is in rows and columns L to I. If eigenvalues only are being computed, only the active submatrix need be transformed. */ if (! (*wantt)) { i1 = l; i2 = i__; } if (its == 10 || its == 20) { /* Exceptional shift. */ h11 = s * .75 + h__[i__ + i__ * h_dim1]; h12 = s * -.4375; h21 = s; h22 = h11; } else { /* Prepare to use Francis' double shift (i.e. 2nd degree generalized Rayleigh quotient) */ h11 = h__[i__ - 1 + (i__ - 1) * h_dim1]; h21 = h__[i__ + (i__ - 1) * h_dim1]; h12 = h__[i__ - 1 + i__ * h_dim1]; h22 = h__[i__ + i__ * h_dim1]; } s = abs(h11) + abs(h12) + abs(h21) + abs(h22); if (s == 0.) { rt1r = 0.; rt1i = 0.; rt2r = 0.; rt2i = 0.; } else { h11 /= s; h21 /= s; h12 /= s; h22 /= s; tr = (h11 + h22) / 2.; det = (h11 - tr) * (h22 - tr) - h12 * h21; rtdisc = sqrt((abs(det))); if (det >= 0.) { /* ==== complex conjugate shifts ==== */ rt1r = tr * s; rt2r = rt1r; rt1i = rtdisc * s; rt2i = -rt1i; } else { /* ==== real shifts (use only one of them) ==== */ rt1r = tr + rtdisc; rt2r = tr - rtdisc; if ((d__1 = rt1r - h22, abs(d__1)) <= (d__2 = rt2r - h22, abs( d__2))) { rt1r *= s; rt2r = rt1r; } else { rt2r *= s; rt1r = rt2r; } rt1i = 0.; rt2i = 0.; } } /* Look for two consecutive small subdiagonal elements. */ i__1 = l; for (m = i__ - 2; m >= i__1; --m) { /* Determine the effect of starting the double-shift QR iteration at row M, and see if this would make H(M,M-1) negligible. (The following uses scaling to avoid overflows and most underflows.) */ h21s = h__[m + 1 + m * h_dim1]; s = (d__1 = h__[m + m * h_dim1] - rt2r, abs(d__1)) + abs(rt2i) + abs(h21s); h21s = h__[m + 1 + m * h_dim1] / s; v[0] = h21s * h__[m + (m + 1) * h_dim1] + (h__[m + m * h_dim1] - rt1r) * ((h__[m + m * h_dim1] - rt2r) / s) - rt1i * (rt2i / s); v[1] = h21s * (h__[m + m * h_dim1] + h__[m + 1 + (m + 1) * h_dim1] - rt1r - rt2r); v[2] = h21s * h__[m + 2 + (m + 1) * h_dim1]; s = abs(v[0]) + abs(v[1]) + abs(v[2]); v[0] /= s; v[1] /= s; v[2] /= s; if (m == l) { goto L60; } if ((d__1 = h__[m + (m - 1) * h_dim1], abs(d__1)) * (abs(v[1]) + abs(v[2])) <= ulp * abs(v[0]) * ((d__2 = h__[m - 1 + (m - 1) * h_dim1], abs(d__2)) + (d__3 = h__[m + m * h_dim1], abs(d__3)) + (d__4 = h__[m + 1 + (m + 1) * h_dim1], abs( d__4)))) { goto L60; } /* L50: */ } L60: /* Double-shift QR step */ i__1 = i__ - 1; for (k = m; k <= i__1; ++k) { /* The first iteration of this loop determines a reflection G from the vector V and applies it from left and right to H, thus creating a nonzero bulge below the subdiagonal. Each subsequent iteration determines a reflection G to restore the Hessenberg form in the (K-1)th column, and thus chases the bulge one step toward the bottom of the active submatrix. NR is the order of G. Computing MIN */ i__2 = 3, i__3 = i__ - k + 1; nr = min(i__2,i__3); if (k > m) { dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); } dlarfg_(&nr, v, &v[1], &c__1, &t1); if (k > m) { h__[k + (k - 1) * h_dim1] = v[0]; h__[k + 1 + (k - 1) * h_dim1] = 0.; if (k < i__ - 1) { h__[k + 2 + (k - 1) * h_dim1] = 0.; } } else if (m > l) { h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1]; } v2 = v[1]; t2 = t1 * v2; if (nr == 3) { v3 = v[2]; t3 = t1 * v3; /* Apply G from the left to transform the rows of the matrix in columns K to I2. */ i__2 = i2; for (j = k; j <= i__2; ++j) { sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1] + v3 * h__[k + 2 + j * h_dim1]; h__[k + j * h_dim1] -= sum * t1; h__[k + 1 + j * h_dim1] -= sum * t2; h__[k + 2 + j * h_dim1] -= sum * t3; /* L70: */ } /* Apply G from the right to transform the columns of the matrix in rows I1 to min(K+3,I). Computing MIN */ i__3 = k + 3; i__2 = min(i__3,i__); for (j = i1; j <= i__2; ++j) { sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] + v3 * h__[j + (k + 2) * h_dim1]; h__[j + k * h_dim1] -= sum * t1; h__[j + (k + 1) * h_dim1] -= sum * t2; h__[j + (k + 2) * h_dim1] -= sum * t3; /* L80: */ } if (*wantz) { /* Accumulate transformations in the matrix Z */ i__2 = *ihiz; for (j = *iloz; j <= i__2; ++j) { sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * z_dim1] + v3 * z__[j + (k + 2) * z_dim1]; z__[j + k * z_dim1] -= sum * t1; z__[j + (k + 1) * z_dim1] -= sum * t2; z__[j + (k + 2) * z_dim1] -= sum * t3; /* L90: */ } } } else if (nr == 2) { /* Apply G from the left to transform the rows of the matrix in columns K to I2. */ i__2 = i2; for (j = k; j <= i__2; ++j) { sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]; h__[k + j * h_dim1] -= sum * t1; h__[k + 1 + j * h_dim1] -= sum * t2; /* L100: */ } /* Apply G from the right to transform the columns of the matrix in rows I1 to min(K+3,I). */ i__2 = i__; for (j = i1; j <= i__2; ++j) { sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] ; h__[j + k * h_dim1] -= sum * t1; h__[j + (k + 1) * h_dim1] -= sum * t2; /* L110: */ } if (*wantz) { /* Accumulate transformations in the matrix Z */ i__2 = *ihiz; for (j = *iloz; j <= i__2; ++j) { sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * z_dim1]; z__[j + k * z_dim1] -= sum * t1; z__[j + (k + 1) * z_dim1] -= sum * t2; /* L120: */ } } } /* L130: */ } /* L140: */ } /* Failure to converge in remaining number of iterations */ *info = i__; return 0; L150: if (l == i__) { /* H(I,I-1) is negligible: one eigenvalue has converged. */ wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; } else if (l == i__ - 1) { /* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. Transform the 2-by-2 submatrix to standard Schur form, and compute and store the eigenvalues. */ dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ * h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ * h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs, &sn); if (*wantt) { /* Apply the transformation to the rest of H. */ if (i2 > i__) { i__1 = i2 - i__; drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn); } i__1 = i__ - i1 - 1; drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ * h_dim1], &c__1, &cs, &sn); } if (*wantz) { /* Apply the transformation to Z. */ drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz + i__ * z_dim1], &c__1, &cs, &sn); } } /* return to start of the main loop with new value of I. */ i__ = l - 1; goto L20; L160: return 0; /* End of DLAHQR */ } /* dlahqr_ */ /* Subroutine */ int dlahr2_(integer *n, integer *k, integer *nb, doublereal * a, integer *lda, doublereal *tau, doublereal *t, integer *ldt, doublereal *y, integer *ldy) { /* System generated locals */ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ static integer i__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dgemm_(char *, char *, integer *, integer *, integer * , doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dgemv_( char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char *, char *, char *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal ei; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAHR2 reduces the first NB columns of A real general n-BY-(n-k+1) matrix A so that elements below the k-th subdiagonal are zero. The reduction is performed by an orthogonal similarity transformation Q' * A * Q. The routine returns the matrices V and T which determine Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. This is an auxiliary routine called by DGEHRD. Arguments ========= N (input) INTEGER The order of the matrix A. K (input) INTEGER The offset for the reduction. Elements below the k-th subdiagonal in the first NB columns are reduced to zero. K < N. NB (input) INTEGER The number of columns to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1) On entry, the n-by-(n-k+1) general matrix A. On exit, the elements on and above the k-th subdiagonal in the first NB columns are overwritten with the corresponding elements of the reduced matrix; the elements below the k-th subdiagonal, with the array TAU, represent the matrix Q as a product of elementary reflectors. The other columns of A are unchanged. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (NB) The scalar factors of the elementary reflectors. See Further Details. T (output) DOUBLE PRECISION array, dimension (LDT,NB) The upper triangular matrix T. LDT (input) INTEGER The leading dimension of the array T. LDT >= NB. Y (output) DOUBLE PRECISION array, dimension (LDY,NB) The n-by-nb matrix Y. LDY (input) INTEGER The leading dimension of the array Y. LDY >= N. Further Details =============== The matrix Q is represented as a product of nb elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in A(i+k+1:n,i), and tau in TAU(i). The elements of the vectors v together form the (n-k+1)-by-nb matrix V which is needed, with T and Y, to apply the transformation to the unreduced part of the matrix, using an update of the form: A := (I - V*T*V') * (A - Y*V'). The contents of A on exit are illustrated by the following example with n = 7, k = 3 and nb = 2: ( a a a a a ) ( a a a a a ) ( a a a a a ) ( h h a a a ) ( v1 h a a a ) ( v1 v2 a a a ) ( v1 v2 a a a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). This file is a slight modification of LAPACK-3.0's DLAHRD incorporating improvements proposed by Quintana-Orti and Van de Gejin. Note that the entries of A(1:K,2:NB) differ from those returned by the original LAPACK routine. This function is not backward compatible with LAPACK3.0. ===================================================================== Quick return if possible */ /* Parameter adjustments */ --tau; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1 * 1; y -= y_offset; /* Function Body */ if (*n <= 1) { return 0; } i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { if (i__ > 1) { /* Update A(K+1:N,I) Update I-th column of A - Y * V' */ i__2 = *n - *k; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &y[*k + 1 + y_dim1], ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b15, &a[*k + 1 + i__ * a_dim1], &c__1); /* Apply I - V * T' * V' to this column (call it b) from the left, using the last column of T as workspace Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) ( V2 ) ( b2 ) where V1 is unit lower triangular w := V1' * b1 */ i__2 = i__ - 1; dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + 1], &c__1); i__2 = i__ - 1; dtrmv_("Lower", "Transpose", "UNIT", &i__2, &a[*k + 1 + a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1); /* w := w + V2'*b2 */ i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b15, &t[*nb * t_dim1 + 1], &c__1); /* w := T'*w */ i__2 = i__ - 1; dtrmv_("Upper", "Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1); /* b2 := b2 - V2*w */ i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &a[*k + i__ + a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b15, &a[*k + i__ + i__ * a_dim1], &c__1); /* b1 := b1 - V1*w */ i__2 = i__ - 1; dtrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1] , lda, &t[*nb * t_dim1 + 1], &c__1); i__2 = i__ - 1; daxpy_(&i__2, &c_b151, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__ * a_dim1], &c__1); a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei; } /* Generate the elementary reflector H(I) to annihilate A(K+I+1:N,I) */ i__2 = *n - *k - i__ + 1; /* Computing MIN */ i__3 = *k + i__ + 1; dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); ei = a[*k + i__ + i__ * a_dim1]; a[*k + i__ + i__ * a_dim1] = 1.; /* Compute Y(K+1:N,I) */ i__2 = *n - *k; i__3 = *n - *k - i__ + 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b15, &a[*k + 1 + (i__ + 1) * a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &y[* k + 1 + i__ * y_dim1], &c__1); i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &t[i__ * t_dim1 + 1], &c__1); i__2 = *n - *k; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &y[*k + 1 + y_dim1], ldy, &t[i__ * t_dim1 + 1], &c__1, &c_b15, &y[*k + 1 + i__ * y_dim1], &c__1); i__2 = *n - *k; dscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1); /* Compute T(1:I,I) */ i__2 = i__ - 1; d__1 = -tau[i__]; dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1); i__2 = i__ - 1; dtrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1) ; t[i__ + i__ * t_dim1] = tau[i__]; /* L10: */ } a[*k + *nb + *nb * a_dim1] = ei; /* Compute Y(1:K,1:NB) */ dlacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy); dtrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b15, &a[*k + 1 + a_dim1], lda, &y[y_offset], ldy); if (*n > *k + *nb) { i__1 = *n - *k - *nb; dgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b15, &a[(*nb + 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, & c_b15, &y[y_offset], ldy); } dtrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b15, &t[ t_offset], ldt, &y[y_offset], ldy); return 0; /* End of DLAHR2 */ } /* dlahr2_ */ /* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw, doublereal *smin, doublereal *ca, doublereal *a, integer *lda, doublereal *d1, doublereal *d2, doublereal *b, integer *ldb, doublereal *wr, doublereal *wi, doublereal *x, integer *ldx, doublereal *scale, doublereal *xnorm, integer *info) { /* Initialized data */ static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2, 4,3,2,1 }; /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset; doublereal d__1, d__2, d__3, d__4, d__5, d__6; static doublereal equiv_0[4], equiv_1[4]; /* Local variables */ static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s; static integer j; static doublereal u22abs; static integer icmax; static doublereal bnorm, cnorm, smini; #define ci (equiv_0) #define cr (equiv_1) extern /* Subroutine */ int dladiv_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal bignum, bi1, bi2, br1, br2, smlnum, xi1, xi2, xr1, xr2, ci21, ci22, cr21, cr22, li21, csi, ui11, lr21, ui12, ui22; #define civ (equiv_0) static doublereal csr, ur11, ur12, ur22; #define crv (equiv_1) /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALN2 solves a system of the form (ca A - w D ) X = s B or (ca A' - w D) X = s B with possible scaling ("s") and perturbation of A. (A' means A-transpose.) A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA real diagonal matrix, w is a real or complex value, and X and B are NA x 1 matrices -- real if w is real, complex if w is complex. NA may be 1 or 2. If w is complex, X and B are represented as NA x 2 matrices, the first column of each being the real part and the second being the imaginary part. "s" is a scaling factor (.LE. 1), computed by DLALN2, which is so chosen that X can be computed without overflow. X is further scaled if necessary to assure that norm(ca A - w D)*norm(X) is less than overflow. If both singular values of (ca A - w D) are less than SMIN, SMIN*identity will be used instead of (ca A - w D). If only one singular value is less than SMIN, one element of (ca A - w D) will be perturbed enough to make the smallest singular value roughly SMIN. If both singular values are at least SMIN, (ca A - w D) will not be perturbed. In any case, the perturbation will be at most some small multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values are computed by infinity-norm approximations, and thus will only be correct to a factor of 2 or so. Note: all input quantities are assumed to be smaller than overflow by a reasonable factor. (See BIGNUM.) Arguments ========== LTRANS (input) LOGICAL =.TRUE.: A-transpose will be used. =.FALSE.: A will be used (not transposed.) NA (input) INTEGER The size of the matrix A. It may (only) be 1 or 2. NW (input) INTEGER 1 if "w" is real, 2 if "w" is complex. It may only be 1 or 2. SMIN (input) DOUBLE PRECISION The desired lower bound on the singular values of A. This should be a safe distance away from underflow or overflow, say, between (underflow/machine precision) and (machine precision * overflow ). (See BIGNUM and ULP.) CA (input) DOUBLE PRECISION The coefficient c, which A is multiplied by. A (input) DOUBLE PRECISION array, dimension (LDA,NA) The NA x NA matrix A. LDA (input) INTEGER The leading dimension of A. It must be at least NA. D1 (input) DOUBLE PRECISION The 1,1 element in the diagonal matrix D. D2 (input) DOUBLE PRECISION The 2,2 element in the diagonal matrix D. Not used if NW=1. B (input) DOUBLE PRECISION array, dimension (LDB,NW) The NA x NW matrix B (right-hand side). If NW=2 ("w" is complex), column 1 contains the real part of B and column 2 contains the imaginary part. LDB (input) INTEGER The leading dimension of B. It must be at least NA. WR (input) DOUBLE PRECISION The real part of the scalar "w". WI (input) DOUBLE PRECISION The imaginary part of the scalar "w". Not used if NW=1. X (output) DOUBLE PRECISION array, dimension (LDX,NW) The NA x NW matrix X (unknowns), as computed by DLALN2. If NW=2 ("w" is complex), on exit, column 1 will contain the real part of X and column 2 will contain the imaginary part. LDX (input) INTEGER The leading dimension of X. It must be at least NA. SCALE (output) DOUBLE PRECISION The scale factor that B must be multiplied by to insure that overflow does not occur when computing X. Thus, (ca A - w D) X will be SCALE*B, not B (ignoring perturbations of A.) It will be at most 1. XNORM (output) DOUBLE PRECISION The infinity-norm of X, when X is regarded as an NA x NW real matrix. INFO (output) INTEGER An error flag. It will be set to zero if no error occurs, a negative number if an argument is in error, or a positive number if ca A - w D had to be perturbed. The possible values are: = 0: No error occurred, and (ca A - w D) did not have to be perturbed. = 1: (ca A - w D) had to be perturbed to make its smallest (or only) singular value greater than SMIN. NOTE: In the interests of speed, this routine does not check the inputs for errors. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; /* Function Body */ /* Compute BIGNUM */ smlnum = 2. * SAFEMINIMUM; bignum = 1. / smlnum; smini = max(*smin,smlnum); /* Don't check for input errors */ *info = 0; /* Standard Initializations */ *scale = 1.; if (*na == 1) { /* 1 x 1 (i.e., scalar) system C X = B */ if (*nw == 1) { /* Real 1x1 system. C = ca A - w D */ csr = *ca * a[a_dim1 + 1] - *wr * *d1; cnorm = abs(csr); /* If | C | < SMINI, use C = SMINI */ if (cnorm < smini) { csr = smini; cnorm = smini; *info = 1; } /* Check scaling for X = B / C */ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)); if (cnorm < 1. && bnorm > 1.) { if (bnorm > bignum * cnorm) { *scale = 1. / bnorm; } } /* Compute X */ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); } else { /* Complex 1x1 system (w is complex) C = ca A - w D */ csr = *ca * a[a_dim1 + 1] - *wr * *d1; csi = -(*wi) * *d1; cnorm = abs(csr) + abs(csi); /* If | C | < SMINI, use C = SMINI */ if (cnorm < smini) { csr = smini; csi = 0.; cnorm = smini; *info = 1; } /* Check scaling for X = B / C */ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1) + 1], abs(d__2)); if (cnorm < 1. && bnorm > 1.) { if (bnorm > bignum * cnorm) { *scale = 1. / bnorm; } } /* Compute X */ d__1 = *scale * b[b_dim1 + 1]; d__2 = *scale * b[(b_dim1 << 1) + 1]; dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[(x_dim1 << 1) + 1]); *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) + 1], abs(d__2)); } } else { /* 2x2 System Compute the real part of C = ca A - w D (or ca A' - w D ) */ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1; cr[3] = *ca * a[(a_dim1 << 1) + 2] - *wr * *d2; if (*ltrans) { cr[2] = *ca * a[a_dim1 + 2]; cr[1] = *ca * a[(a_dim1 << 1) + 1]; } else { cr[1] = *ca * a[a_dim1 + 2]; cr[2] = *ca * a[(a_dim1 << 1) + 1]; } if (*nw == 1) { /* Real 2x2 system (w is real) Find the largest element in C */ cmax = 0.; icmax = 0; for (j = 1; j <= 4; ++j) { if ((d__1 = crv[j - 1], abs(d__1)) > cmax) { cmax = (d__1 = crv[j - 1], abs(d__1)); icmax = j; } /* L10: */ } /* If norm(C) < SMINI, use SMINI*identity. */ if (cmax < smini) { /* Computing MAX */ d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[ b_dim1 + 2], abs(d__2)); bnorm = max(d__3,d__4); if (smini < 1. && bnorm > 1.) { if (bnorm > bignum * smini) { *scale = 1. / bnorm; } } temp = *scale / smini; x[x_dim1 + 1] = temp * b[b_dim1 + 1]; x[x_dim1 + 2] = temp * b[b_dim1 + 2]; *xnorm = temp * bnorm; *info = 1; return 0; } /* Gaussian elimination with complete pivoting. */ ur11 = crv[icmax - 1]; cr21 = crv[ipivot[(icmax << 2) - 3] - 1]; ur12 = crv[ipivot[(icmax << 2) - 2] - 1]; cr22 = crv[ipivot[(icmax << 2) - 1] - 1]; ur11r = 1. / ur11; lr21 = ur11r * cr21; ur22 = cr22 - ur12 * lr21; /* If smaller pivot < SMINI, use SMINI */ if (abs(ur22) < smini) { ur22 = smini; *info = 1; } if (rswap[icmax - 1]) { br1 = b[b_dim1 + 2]; br2 = b[b_dim1 + 1]; } else { br1 = b[b_dim1 + 1]; br2 = b[b_dim1 + 2]; } br2 -= lr21 * br1; /* Computing MAX */ d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2); bbnd = max(d__2,d__3); if (bbnd > 1. && abs(ur22) < 1.) { if (bbnd >= bignum * abs(ur22)) { *scale = 1. / bbnd; } } xr2 = br2 * *scale / ur22; xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12); if (zswap[icmax - 1]) { x[x_dim1 + 1] = xr2; x[x_dim1 + 2] = xr1; } else { x[x_dim1 + 1] = xr1; x[x_dim1 + 2] = xr2; } /* Computing MAX */ d__1 = abs(xr1), d__2 = abs(xr2); *xnorm = max(d__1,d__2); /* Further scaling if norm(A) norm(X) > overflow */ if (*xnorm > 1. && cmax > 1.) { if (*xnorm > bignum / cmax) { temp = cmax / bignum; x[x_dim1 + 1] = temp * x[x_dim1 + 1]; x[x_dim1 + 2] = temp * x[x_dim1 + 2]; *xnorm = temp * *xnorm; *scale = temp * *scale; } } } else { /* Complex 2x2 system (w is complex) Find the largest element in C */ ci[0] = -(*wi) * *d1; ci[1] = 0.; ci[2] = 0.; ci[3] = -(*wi) * *d2; cmax = 0.; icmax = 0; for (j = 1; j <= 4; ++j) { if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs( d__2)) > cmax) { cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1] , abs(d__2)); icmax = j; } /* L20: */ } /* If norm(C) < SMINI, use SMINI*identity. */ if (cmax < smini) { /* Computing MAX */ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 + 2], abs(d__3)) + (d__4 = b[(b_dim1 << 1) + 2], abs(d__4)); bnorm = max(d__5,d__6); if (smini < 1. && bnorm > 1.) { if (bnorm > bignum * smini) { *scale = 1. / bnorm; } } temp = *scale / smini; x[x_dim1 + 1] = temp * b[b_dim1 + 1]; x[x_dim1 + 2] = temp * b[b_dim1 + 2]; x[(x_dim1 << 1) + 1] = temp * b[(b_dim1 << 1) + 1]; x[(x_dim1 << 1) + 2] = temp * b[(b_dim1 << 1) + 2]; *xnorm = temp * bnorm; *info = 1; return 0; } /* Gaussian elimination with complete pivoting. */ ur11 = crv[icmax - 1]; ui11 = civ[icmax - 1]; cr21 = crv[ipivot[(icmax << 2) - 3] - 1]; ci21 = civ[ipivot[(icmax << 2) - 3] - 1]; ur12 = crv[ipivot[(icmax << 2) - 2] - 1]; ui12 = civ[ipivot[(icmax << 2) - 2] - 1]; cr22 = crv[ipivot[(icmax << 2) - 1] - 1]; ci22 = civ[ipivot[(icmax << 2) - 1] - 1]; if (icmax == 1 || icmax == 4) { /* Code when off-diagonals of pivoted C are real */ if (abs(ur11) > abs(ui11)) { temp = ui11 / ur11; /* Computing 2nd power */ d__1 = temp; ur11r = 1. / (ur11 * (d__1 * d__1 + 1.)); ui11r = -temp * ur11r; } else { temp = ur11 / ui11; /* Computing 2nd power */ d__1 = temp; ui11r = -1. / (ui11 * (d__1 * d__1 + 1.)); ur11r = -temp * ui11r; } lr21 = cr21 * ur11r; li21 = cr21 * ui11r; ur12s = ur12 * ur11r; ui12s = ur12 * ui11r; ur22 = cr22 - ur12 * lr21; ui22 = ci22 - ur12 * li21; } else { /* Code when diagonals of pivoted C are real */ ur11r = 1. / ur11; ui11r = 0.; lr21 = cr21 * ur11r; li21 = ci21 * ur11r; ur12s = ur12 * ur11r; ui12s = ui12 * ur11r; ur22 = cr22 - ur12 * lr21 + ui12 * li21; ui22 = -ur12 * li21 - ui12 * lr21; } u22abs = abs(ur22) + abs(ui22); /* If smaller pivot < SMINI, use SMINI */ if (u22abs < smini) { ur22 = smini; ui22 = 0.; *info = 1; } if (rswap[icmax - 1]) { br2 = b[b_dim1 + 1]; br1 = b[b_dim1 + 2]; bi2 = b[(b_dim1 << 1) + 1]; bi1 = b[(b_dim1 << 1) + 2]; } else { br1 = b[b_dim1 + 1]; br2 = b[b_dim1 + 2]; bi1 = b[(b_dim1 << 1) + 1]; bi2 = b[(b_dim1 << 1) + 2]; } br2 = br2 - lr21 * br1 + li21 * bi1; bi2 = bi2 - li21 * br1 - lr21 * bi1; /* Computing MAX */ d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r)) ), d__2 = abs(br2) + abs(bi2); bbnd = max(d__1,d__2); if (bbnd > 1. && u22abs < 1.) { if (bbnd >= bignum * u22abs) { *scale = 1. / bbnd; br1 = *scale * br1; bi1 = *scale * bi1; br2 = *scale * br2; bi2 = *scale * bi2; } } dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2); xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2; xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2; if (zswap[icmax - 1]) { x[x_dim1 + 1] = xr2; x[x_dim1 + 2] = xr1; x[(x_dim1 << 1) + 1] = xi2; x[(x_dim1 << 1) + 2] = xi1; } else { x[x_dim1 + 1] = xr1; x[x_dim1 + 2] = xr2; x[(x_dim1 << 1) + 1] = xi1; x[(x_dim1 << 1) + 2] = xi2; } /* Computing MAX */ d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2); *xnorm = max(d__1,d__2); /* Further scaling if norm(A) norm(X) > overflow */ if (*xnorm > 1. && cmax > 1.) { if (*xnorm > bignum / cmax) { temp = cmax / bignum; x[x_dim1 + 1] = temp * x[x_dim1 + 1]; x[x_dim1 + 2] = temp * x[x_dim1 + 2]; x[(x_dim1 << 1) + 1] = temp * x[(x_dim1 << 1) + 1]; x[(x_dim1 << 1) + 2] = temp * x[(x_dim1 << 1) + 2]; *xnorm = temp * *xnorm; *scale = temp * *scale; } } } } return 0; /* End of DLALN2 */ } /* dlaln2_ */ #undef crv #undef civ #undef cr #undef ci /* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr, integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal * poles, doublereal *difl, doublereal *difr, doublereal *z__, integer * k, doublereal *c__, doublereal *s, doublereal *work, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, i__1, i__2; doublereal d__1; /* Local variables */ static doublereal temp; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j, m, n; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal diflj, difrj, dsigj; extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal dj; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal dsigjp; static integer nlp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALS0 applies back the multiplying factors of either the left or the right singular vector matrix of a diagonal matrix appended by a row to the right hand side matrix B in solving the least squares problem using the divide-and-conquer SVD approach. For the left singular vector matrix, three types of orthogonal matrices are involved: (1L) Givens rotations: the number of such rotations is GIVPTR; the pairs of columns/rows they were applied to are stored in GIVCOL; and the C- and S-values of these rotations are stored in GIVNUM. (2L) Permutation. The (NL+1)-st row of B is to be moved to the first row, and for J=2:N, PERM(J)-th row of B is to be moved to the J-th row. (3L) The left singular vector matrix of the remaining matrix. For the right singular vector matrix, four types of orthogonal matrices are involved: (1R) The right singular vector matrix of the remaining matrix. (2R) If SQRE = 1, one extra Givens rotation to generate the right null space. (3R) The inverse transformation of (2L). (4R) The inverse transformation of (1L). Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form: = 0: Left singular vector matrix. = 1: Right singular vector matrix. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. NRHS (input) INTEGER The number of columns of B and BX. NRHS must be at least 1. B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) On input, B contains the right hand sides of the least squares problem in rows 1 through M. On output, B contains the solution X in rows 1 through N. LDB (input) INTEGER The leading dimension of B. LDB must be at least max(1,MAX( M, N ) ). BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) LDBX (input) INTEGER The leading dimension of BX. PERM (input) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) applied to the two blocks. GIVPTR (input) INTEGER The number of Givens rotations which took place in this subproblem. GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of rows/columns involved in a Givens rotation. LDGCOL (input) INTEGER The leading dimension of GIVCOL, must be at least N. GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value used in the corresponding Givens rotation. LDGNUM (input) INTEGER The leading dimension of arrays DIFR, POLES and GIVNUM, must be at least K. POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) On entry, POLES(1:K, 1) contains the new singular values obtained from solving the secular equation, and POLES(1:K, 2) is an array containing the poles in the secular equation. DIFL (input) DOUBLE PRECISION array, dimension ( K ). On entry, DIFL(I) is the distance between I-th updated (undeflated) singular value and the I-th (undeflated) old singular value. DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). On entry, DIFR(I, 1) contains the distances between I-th updated (undeflated) singular value and the I+1-th (undeflated) old singular value. And DIFR(I, 2) is the normalizing factor for the I-th right singular vector. Z (input) DOUBLE PRECISION array, dimension ( K ) Contain the components of the deflation-adjusted updating row vector. K (input) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. C (input) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (input) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. WORK (workspace) DOUBLE PRECISION array, dimension ( K ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; bx_dim1 = *ldbx; bx_offset = 1 + bx_dim1 * 1; bx -= bx_offset; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; difr_dim1 = *ldgnum; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; poles_dim1 = *ldgnum; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; --difl; --z__; --work; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } n = *nl + *nr + 1; if (*nrhs < 1) { *info = -5; } else if (*ldb < n) { *info = -7; } else if (*ldbx < n) { *info = -9; } else if (*givptr < 0) { *info = -11; } else if (*ldgcol < n) { *info = -13; } else if (*ldgnum < n) { *info = -15; } else if (*k < 1) { *info = -20; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALS0", &i__1); return 0; } m = n + *sqre; nlp1 = *nl + 1; if (*icompq == 0) { /* Apply back orthogonal transformations from the left. Step (1L): apply back the Givens rotations performed. */ i__1 = *givptr; for (i__ = 1; i__ <= i__1; ++i__) { drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]); /* L10: */ } /* Step (2L): permute rows of B. */ dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1], ldbx); /* L20: */ } /* Step (3L): apply the inverse of the left singular vector matrix to BX. */ if (*k == 1) { dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb); if (z__[1] < 0.) { dscal_(nrhs, &c_b151, &b[b_offset], ldb); } } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { diflj = difl[j]; dj = poles[j + poles_dim1]; dsigj = -poles[j + (poles_dim1 << 1)]; if (j < *k) { difrj = -difr[j + difr_dim1]; dsigjp = -poles[j + 1 + (poles_dim1 << 1)]; } if (z__[j] == 0. || poles[j + (poles_dim1 << 1)] == 0.) { work[j] = 0.; } else { work[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj / (poles[j + (poles_dim1 << 1)] + dj); } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] == 0.) { work[i__] = 0.; } else { work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigj) - diflj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L30: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] == 0.) { work[i__] = 0.; } else { work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigjp) + difrj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L40: */ } work[1] = -1.; temp = dnrm2_(k, &work[1], &c__1); dgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], & c__1, &c_b29, &b[j + b_dim1], ldb); dlascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j + b_dim1], ldb, info); /* L50: */ } } /* Move the deflated rows of BX to B also. */ if (*k < max(m,n)) { i__1 = n - *k; dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1 + b_dim1], ldb); } } else { /* Apply back the right orthogonal transformations. Step (1R): apply back the new right singular vector matrix to B. */ if (*k == 1) { dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx); } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { dsigj = poles[j + (poles_dim1 << 1)]; if (z__[j] == 0.) { work[j] = 0.; } else { work[j] = -z__[j] / difl[j] / (dsigj + poles[j + poles_dim1]) / difr[j + (difr_dim1 << 1)]; } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[j] == 0.) { work[i__] = 0.; } else { d__1 = -poles[i__ + 1 + (poles_dim1 << 1)]; work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[ i__ + difr_dim1]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L60: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[j] == 0.) { work[i__] = 0.; } else { d__1 = -poles[i__ + (poles_dim1 << 1)]; work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[ i__]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L70: */ } dgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], & c__1, &c_b29, &bx[j + bx_dim1], ldbx); /* L80: */ } } /* Step (2R): if SQRE = 1, apply back the rotation that is related to the right null space of the subproblem. */ if (*sqre == 1) { dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx); drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__, s); } if (*k < max(m,n)) { i__1 = n - *k; dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 + bx_dim1], ldbx); } /* Step (3R): permute rows of B. */ dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb); if (*sqre == 1) { dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb); } i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1], ldb); /* L90: */ } /* Step (4R): apply back the Givens rotations performed. */ for (i__ = *givptr; i__ >= 1; --i__) { d__1 = -givnum[i__ + givnum_dim1]; drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &d__1); /* L100: */ } } return 0; /* End of DLALS0 */ } /* dlals0_ */ /* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n, integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer * ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal * poles, integer *givptr, integer *givcol, integer *ldgcol, integer * perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal * work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1, b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static integer nlvl, sqre, i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer inode, ndiml, ndimr; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i1; extern /* Subroutine */ int dlals0_(integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *); static integer ic, lf, nd, ll, nl, nr; extern /* Subroutine */ int dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer im1, nlf, nrf, lvl, ndb1, nlp1, lvl2, nrp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALSA is an itermediate step in solving the least squares problem by computing the SVD of the coefficient matrix in compact form (The singular vectors are computed as products of simple orthorgonal matrices.). If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector matrix of an upper bidiagonal matrix to the right hand side; and if ICOMPQ = 1, DLALSA applies the right singular vector matrix to the right hand side. The singular vector matrices were generated in compact form by DLALSA. Arguments ========= ICOMPQ (input) INTEGER Specifies whether the left or the right singular vector matrix is involved. = 0: Left singular vector matrix = 1: Right singular vector matrix SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The row and column dimensions of the upper bidiagonal matrix. NRHS (input) INTEGER The number of columns of B and BX. NRHS must be at least 1. B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) On input, B contains the right hand sides of the least squares problem in rows 1 through M. On output, B contains the solution X in rows 1 through N. LDB (input) INTEGER The leading dimension of B in the calling subprogram. LDB must be at least max(1,MAX( M, N ) ). BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) On exit, the result of applying the left or right singular vector matrix to B. LDBX (input) INTEGER The leading dimension of BX. U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). On entry, U contains the left singular vector matrices of all subproblems at the bottom level. LDU (input) INTEGER, LDU = > N. The leading dimension of arrays U, VT, DIFL, DIFR, POLES, GIVNUM, and Z. VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). On entry, VT' contains the right singular vector matrices of all subproblems at the bottom level. K (input) INTEGER array, dimension ( N ). DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record distances between singular values on the I-th level and singular values on the (I -1)-th level, and DIFR(*, 2 * I) record the normalizing factors of the right singular vectors matrices of subproblems on I-th level. Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). On entry, Z(1, I) contains the components of the deflation- adjusted updating row vector for subproblems on the I-th level. POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old singular values involved in the secular equations on the I-th level. GIVPTR (input) INTEGER array, dimension ( N ). On entry, GIVPTR( I ) records the number of Givens rotations performed on the I-th problem on the computation tree. GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the locations of Givens rotations performed on the I-th level on the computation tree. LDGCOL (input) INTEGER, LDGCOL = > N. The leading dimension of arrays GIVCOL and PERM. PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). On entry, PERM(*, I) records permutations done on the I-th level of the computation tree. GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- values of Givens rotations performed on the I-th level on the computation tree. C (input) DOUBLE PRECISION array, dimension ( N ). On entry, if the I-th subproblem is not square, C( I ) contains the C-value of a Givens rotation related to the right null space of the I-th subproblem. S (input) DOUBLE PRECISION array, dimension ( N ). On entry, if the I-th subproblem is not square, S( I ) contains the S-value of a Givens rotation related to the right null space of the I-th subproblem. WORK (workspace) DOUBLE PRECISION array. The dimension must be at least N. IWORK (workspace) INTEGER array. The dimension must be at least 3 * N INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; bx_dim1 = *ldbx; bx_offset = 1 + bx_dim1 * 1; bx -= bx_offset; givnum_dim1 = *ldu; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; poles_dim1 = *ldu; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; z_dim1 = *ldu; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; difr_dim1 = *ldu; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; difl_dim1 = *ldu; difl_offset = 1 + difl_dim1 * 1; difl -= difl_offset; vt_dim1 = *ldu; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; --k; --givptr; perm_dim1 = *ldgcol; perm_offset = 1 + perm_dim1 * 1; perm -= perm_offset; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; --c__; --s; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*smlsiz < 3) { *info = -2; } else if (*n < *smlsiz) { *info = -3; } else if (*nrhs < 1) { *info = -4; } else if (*ldb < *n) { *info = -6; } else if (*ldbx < *n) { *info = -8; } else if (*ldu < *n) { *info = -10; } else if (*ldgcol < *n) { *info = -19; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALSA", &i__1); return 0; } /* Book-keeping and setting up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* The following code applies back the left singular vector factors. For applying back the right singular vector factors, go to 50. */ if (*icompq == 1) { goto L50; } /* The nodes on the bottom level of the tree were solved by DLASDQ. The corresponding left and right singular vector matrices are in explicit form. First apply back the left singular vector matrices. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nr = iwork[ndimr + i1]; nlf = ic - nl; nrf = ic + 1; dgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[ nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); dgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[ nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); /* L10: */ } /* Next copy the rows of B that correspond to unchanged rows in the bidiagonal matrix to BX. */ i__1 = nd; for (i__ = 1; i__ <= i__1; ++i__) { ic = iwork[inode + i__ - 1]; dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx); /* L20: */ } /* Finally go through the left singular vector matrices of all the other subproblems bottom-up on the tree. */ j = pow_ii(&c__2, &nlvl); sqre = 0; for (lvl = nlvl; lvl >= 1; --lvl) { lvl2 = (lvl << 1) - 1; /* find the first node LF and last node LL on the current level LVL */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; --j; dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, & b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], & givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ j], &s[j], &work[1], info); /* L30: */ } /* L40: */ } goto L90; /* ICOMPQ = 1: applying back the right singular vector factors. */ L50: /* First now go through the right singular vector matrices of all the tree nodes top-down. */ j = 0; i__1 = nlvl; for (lvl = 1; lvl <= i__1; ++lvl) { lvl2 = (lvl << 1) - 1; /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__2 = lvl - 1; lf = pow_ii(&c__2, &i__2); ll = (lf << 1) - 1; } i__2 = lf; for (i__ = ll; i__ >= i__2; --i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; if (i__ == ll) { sqre = 0; } else { sqre = 1; } ++j; dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], & givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ j], &s[j], &work[1], info); /* L60: */ } /* L70: */ } /* The nodes on the bottom level of the tree were solved by DLASDQ. The corresponding right singular vector matrices are in explicit form. Apply them back. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nr = iwork[ndimr + i1]; nlp1 = nl + 1; if (i__ == nd) { nrp1 = nr; } else { nrp1 = nr + 1; } nlf = ic - nl; nrf = ic + 1; dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu, &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu, &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); /* L80: */ } L90: return 0; /* End of DLALSA */ } /* dlalsa_ */ /* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb, doublereal *rcond, integer *rank, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer b_dim1, b_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double log(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer difl, difr; static doublereal rcnd; static integer perm, nsub; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer nlvl, sqre, bxst, c__, i__, j, k; static doublereal r__; static integer s, u; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer z__; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer poles, sizei, nsize, nwork, icmpq1, icmpq2; static doublereal cs; extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer bx; extern /* Subroutine */ int dlalsa_(integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); extern integer idamax_(integer *, doublereal *, integer *); static integer st; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer vt; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer givcol; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static doublereal orgnrm; static integer givnum, givptr, nm1, smlszp, st1; static doublereal eps; static integer iwk; static doublereal tol; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALSD uses the singular value decomposition of A to solve the least squares problem of finding X to minimize the Euclidean norm of each column of A*X-B, where A is N-by-N upper bidiagonal, and X and B are N-by-NRHS. The solution X overwrites B. The singular values of A smaller than RCOND times the largest singular value are treated as zero in solving the least squares problem; in this case a minimum norm solution is returned. The actual singular values are returned in D in ascending order. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= UPLO (input) CHARACTER*1 = 'U': D and E define an upper bidiagonal matrix. = 'L': D and E define a lower bidiagonal matrix. SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The dimension of the bidiagonal matrix. N >= 0. NRHS (input) INTEGER The number of columns of B. NRHS must be at least 1. D (input/output) DOUBLE PRECISION array, dimension (N) On entry D contains the main diagonal of the bidiagonal matrix. On exit, if INFO = 0, D contains its singular values. E (input/output) DOUBLE PRECISION array, dimension (N-1) Contains the super-diagonal entries of the bidiagonal matrix. On exit, E has been destroyed. B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On input, B contains the right hand sides of the least squares problem. On output, B contains the solution X. LDB (input) INTEGER The leading dimension of B in the calling subprogram. LDB must be at least max(1,N). RCOND (input) DOUBLE PRECISION The singular values of A less than or equal to RCOND times the largest singular value are treated as zero in solving the least squares problem. If RCOND is negative, machine precision is used instead. For example, if diag(S)*X=B were the least squares problem, where diag(S) is a diagonal matrix of singular values, the solution would be X(i) = B(i) / S(i) if S(i) is greater than RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to RCOND*max(S). RANK (output) INTEGER The number of singular values of A greater than RCOND times the largest singular value. WORK (workspace) DOUBLE PRECISION array, dimension at least (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2), where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1). IWORK (workspace) INTEGER array, dimension at least (3*N*NLVL + 11*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an singular value while working on the submatrix lying in rows and columns INFO/(N+1) through MOD(INFO,N+1). Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; --work; --iwork; /* Function Body */ *info = 0; if (*n < 0) { *info = -3; } else if (*nrhs < 1) { *info = -4; } else if (*ldb < 1 || *ldb < *n) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALSD", &i__1); return 0; } eps = EPSILON; /* Set up the tolerance. */ if (*rcond <= 0. || *rcond >= 1.) { rcnd = eps; } else { rcnd = *rcond; } *rank = 0; /* Quick return if possible. */ if (*n == 0) { return 0; } else if (*n == 1) { if (d__[1] == 0.) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); } else { *rank = 1; dlascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[ b_offset], ldb, info); d__[1] = abs(d__[1]); } return 0; } /* Rotate the matrix if it is lower bidiagonal. */ if (*(unsigned char *)uplo == 'L') { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (*nrhs == 1) { drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], & c__1, &cs, &sn); } else { work[(i__ << 1) - 1] = cs; work[i__ * 2] = sn; } /* L10: */ } if (*nrhs > 1) { i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *n - 1; for (j = 1; j <= i__2; ++j) { cs = work[(j << 1) - 1]; sn = work[j * 2]; drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ * b_dim1], &c__1, &cs, &sn); /* L20: */ } /* L30: */ } } } /* Scale. */ nm1 = *n - 1; orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { dlaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); return 0; } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, info); /* If N is smaller than the minimum divide size SMLSIZ, then solve the problem with another solver. */ if (*n <= *smlsiz) { nwork = *n * *n + 1; dlaset_("A", n, n, &c_b29, &c_b15, &work[1], n); dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, & work[1], n, &b[b_offset], ldb, &work[nwork], info); if (*info != 0) { return 0; } tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] <= tol) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1], ldb); } else { dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[ i__ + b_dim1], ldb, info); ++(*rank); } /* L40: */ } dgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, & c_b29, &work[nwork], n); dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb); /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info); dlasrt_("D", n, &d__[1], info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb, info); return 0; } /* Book-keeping and setting up some constants. */ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) / log(2.)) + 1; smlszp = *smlsiz + 1; u = 1; vt = *smlsiz * *n + 1; difl = vt + smlszp * *n; difr = difl + nlvl * *n; z__ = difr + (nlvl * *n << 1); c__ = z__ + nlvl * *n; s = c__ + *n; poles = s + *n; givnum = poles + (nlvl << 1) * *n; bx = givnum + (nlvl << 1) * *n; nwork = bx + *n * *nrhs; sizei = *n + 1; k = sizei + *n; givptr = k + *n; perm = givptr + *n; givcol = perm + nlvl * *n; iwk = givcol + (nlvl * *n << 1); st = 1; sqre = 0; icmpq1 = 1; icmpq2 = 0; nsub = 0; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) < eps) { d__[i__] = d_sign(&eps, &d__[i__]); } /* L50: */ } i__1 = nm1; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { ++nsub; iwork[nsub] = st; /* Subproblem found. First determine its size and then apply divide and conquer on it. */ if (i__ < nm1) { /* A subproblem with E(I) small for I < NM1. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; } else if ((d__1 = e[i__], abs(d__1)) >= eps) { /* A subproblem with E(NM1) not too small but I = NM1. */ nsize = *n - st + 1; iwork[sizei + nsub - 1] = nsize; } else { /* A subproblem with E(NM1) small. This implies an 1-by-1 subproblem at D(N), which is not solved explicitly. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; ++nsub; iwork[nsub] = *n; iwork[sizei + nsub - 1] = 1; dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n); } st1 = st - 1; if (nsize == 1) { /* This is a 1-by-1 subproblem and is not solved explicitly. */ dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else if (nsize <= *smlsiz) { /* This is a small subproblem and is solved by DLASDQ. */ dlaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1], n); dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[ st], &work[vt + st1], n, &work[nwork], n, &b[st + b_dim1], ldb, &work[nwork], info); if (*info != 0) { return 0; } dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else { /* A large problem. Solve it using divide and conquer. */ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], & work[u + st1], n, &work[vt + st1], &iwork[k + st1], & work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[iwk], info); if (*info != 0) { return 0; } bxst = bx + st1; dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, & work[bxst], n, &work[u + st1], n, &work[vt + st1], & iwork[k + st1], &work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[givcol + st1], n, &iwork[perm + st1], & work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[iwk], info); if (*info != 0) { return 0; } } st = i__ + 1; } /* L60: */ } /* Apply the singular values and treat the tiny ones as zero. */ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Some of the elements in D can be negative because 1-by-1 subproblems were not solved explicitly. */ if ((d__1 = d__[i__], abs(d__1)) <= tol) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n); } else { ++(*rank); dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[ bx + i__ - 1], n, info); } d__[i__] = (d__1 = d__[i__], abs(d__1)); /* L70: */ } /* Now apply back the right singular vectors. */ icmpq2 = 1; i__1 = nsub; for (i__ = 1; i__ <= i__1; ++i__) { st = iwork[i__]; st1 = st - 1; nsize = iwork[sizei + i__ - 1]; bxst = bx + st1; if (nsize == 1) { dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb); } else if (nsize <= *smlsiz) { dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n, &work[bxst], n, &c_b29, &b[st + b_dim1], ldb); } else { dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st + b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[ k + st1], &work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[ iwk], info); if (*info != 0) { return 0; } } /* L80: */ } /* Unscale and sort the singular values. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info); dlasrt_("D", n, &d__[1], info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb, info); return 0; /* End of DLALSD */ } /* dlalsd_ */ /* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer *dtrd1, integer *dtrd2, integer *index) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, ind1, ind2, n1sv, n2sv; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAMRG will create a permutation list which will merge the elements of A (which is composed of two independently sorted sets) into a single set which is sorted in ascending order. Arguments ========= N1 (input) INTEGER N2 (input) INTEGER These arguements contain the respective lengths of the two sorted lists to be merged. A (input) DOUBLE PRECISION array, dimension (N1+N2) The first N1 elements of A contain a list of numbers which are sorted in either ascending or descending order. Likewise for the final N2 elements. DTRD1 (input) INTEGER DTRD2 (input) INTEGER These are the strides to be taken through the array A. Allowable strides are 1 and -1. They indicate whether a subset of A is sorted in ascending (DTRDx = 1) or descending (DTRDx = -1) order. INDEX (output) INTEGER array, dimension (N1+N2) On exit this array will contain a permutation such that if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be sorted in ascending order. ===================================================================== */ /* Parameter adjustments */ --index; --a; /* Function Body */ n1sv = *n1; n2sv = *n2; if (*dtrd1 > 0) { ind1 = 1; } else { ind1 = *n1; } if (*dtrd2 > 0) { ind2 = *n1 + 1; } else { ind2 = *n1 + *n2; } i__ = 1; /* while ( (N1SV > 0) & (N2SV > 0) ) */ L10: if (n1sv > 0 && n2sv > 0) { if (a[ind1] <= a[ind2]) { index[i__] = ind1; ++i__; ind1 += *dtrd1; --n1sv; } else { index[i__] = ind2; ++i__; ind2 += *dtrd2; --n2sv; } goto L10; } /* end while */ if (n1sv == 0) { i__1 = n2sv; for (n1sv = 1; n1sv <= i__1; ++n1sv) { index[i__] = ind2; ++i__; ind2 += *dtrd2; /* L20: */ } } else { /* N2SV .EQ. 0 */ i__1 = n1sv; for (n2sv = 1; n2sv <= i__1; ++n2sv) { index[i__] = ind1; ++i__; ind1 += *dtrd1; /* L30: */ } } return 0; /* End of DLAMRG */ } /* dlamrg_ */ doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer *lda, doublereal *work) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer i__, j; static doublereal scale; extern logical lsame_(char *, char *); static doublereal value; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description =========== DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANGE as described above. M (input) INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. N (input) INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. A (input) DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(M,1). WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --work; /* Function Body */ if (min(*m,*n) == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ value = 0.; i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)); value = max(d__2,d__3); /* L10: */ } /* L20: */ } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1') { /* Find norm1(A). */ value = 0.; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = 0.; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { sum += (d__1 = a[i__ + j * a_dim1], abs(d__1)); /* L30: */ } value = max(value,sum); /* L40: */ } } else if (lsame_(norm, "I")) { /* Find normI(A). */ i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L50: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)); /* L60: */ } /* L70: */ } value = 0.; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L80: */ } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum); /* L90: */ } value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of DLANGE */ } /* dlange_ */ doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e) { /* System generated locals */ integer i__1; doublereal ret_val, d__1, d__2, d__3, d__4, d__5; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer i__; static doublereal scale; extern logical lsame_(char *, char *); static doublereal anorm; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANST returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric tridiagonal matrix A. Description =========== DLANST returns the value DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANST as described above. N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, DLANST is set to zero. D (input) DOUBLE PRECISION array, dimension (N) The diagonal elements of A. E (input) DOUBLE PRECISION array, dimension (N-1) The (n-1) sub-diagonal or super-diagonal elements of A. ===================================================================== */ /* Parameter adjustments */ --e; --d__; /* Function Body */ if (*n <= 0) { anorm = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ anorm = (d__1 = d__[*n], abs(d__1)); i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1)); anorm = max(d__2,d__3); /* Computing MAX */ d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1)); anorm = max(d__2,d__3); /* L10: */ } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1' || lsame_(norm, "I")) { /* Find norm1(A). */ if (*n == 1) { anorm = abs(d__[1]); } else { /* Computing MAX */ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs( d__1)) + (d__2 = d__[*n], abs(d__2)); anorm = max(d__3,d__4); i__1 = *n - 1; for (i__ = 2; i__ <= i__1; ++i__) { /* Computing MAX */ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[ i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3)); anorm = max(d__4,d__5); /* L20: */ } } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; if (*n > 1) { i__1 = *n - 1; dlassq_(&i__1, &e[1], &c__1, &scale, &sum); sum *= 2; } dlassq_(n, &d__[1], &c__1, &scale, &sum); anorm = scale * sqrt(sum); } ret_val = anorm; return ret_val; /* End of DLANST */ } /* dlanst_ */ doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer *lda, doublereal *work) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal absa; static integer i__, j; static doublereal scale; extern logical lsame_(char *, char *); static doublereal value; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. Description =========== DLANSY returns the value DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANSY as described above. UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. = 'U': Upper triangular part of A is referenced = 'L': Lower triangular part of A is referenced N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, DLANSY is set to zero. A (input) DOUBLE PRECISION array, dimension (LDA,N) The symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(N,1). WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, WORK is not referenced. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --work; /* Function Body */ if (*n == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( d__1)); value = max(d__2,d__3); /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( d__1)); value = max(d__2,d__3); /* L30: */ } /* L40: */ } } } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') { /* Find normI(A) ( = norm1(A), since A is symmetric). */ value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); sum += absa; work[i__] += absa; /* L50: */ } work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1)); /* L60: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L70: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L80: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1)); i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); sum += absa; work[i__] += absa; /* L90: */ } value = max(value,sum); /* L100: */ } } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 2; j <= i__1; ++j) { i__2 = j - 1; dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); /* L110: */ } } else { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = *n - j; dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum); /* L120: */ } } sum *= 2; i__1 = *lda + 1; dlassq_(n, &a[a_offset], &i__1, &scale, &sum); value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of DLANSY */ } /* dlansy_ */ /* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r, doublereal *rt2i, doublereal *cs, doublereal *sn) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double d_sign(doublereal *, doublereal *), sqrt(doublereal); /* Local variables */ static doublereal temp, p, scale, bcmax, z__, bcmis, sigma; extern doublereal dlapy2_(doublereal *, doublereal *); static doublereal aa, bb, cc, dd; static doublereal cs1, sn1, sab, sac, eps, tau; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric matrix in standard form: [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ] [ C D ] [ SN CS ] [ CC DD ] [-SN CS ] where either 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex conjugate eigenvalues. Arguments ========= A (input/output) DOUBLE PRECISION B (input/output) DOUBLE PRECISION C (input/output) DOUBLE PRECISION D (input/output) DOUBLE PRECISION On entry, the elements of the input matrix. On exit, they are overwritten by the elements of the standardised Schur form. RT1R (output) DOUBLE PRECISION RT1I (output) DOUBLE PRECISION RT2R (output) DOUBLE PRECISION RT2I (output) DOUBLE PRECISION The real and imaginary parts of the eigenvalues. If the eigenvalues are a complex conjugate pair, RT1I > 0. CS (output) DOUBLE PRECISION SN (output) DOUBLE PRECISION Parameters of the rotation matrix. Further Details =============== Modified by V. Sima, Research Institute for Informatics, Bucharest, Romania, to reduce the risk of cancellation errors, when computing real eigenvalues, and to ensure, if possible, that abs(RT1R) >= abs(RT2R). ===================================================================== */ eps = PRECISION; if (*c__ == 0.) { *cs = 1.; *sn = 0.; goto L10; } else if (*b == 0.) { /* Swap rows and columns */ *cs = 0.; *sn = 1.; temp = *d__; *d__ = *a; *a = temp; *b = -(*c__); *c__ = 0.; goto L10; } else if (*a - *d__ == 0. && d_sign(&c_b15, b) != d_sign(&c_b15, c__)) { *cs = 1.; *sn = 0.; goto L10; } else { temp = *a - *d__; p = temp * .5; /* Computing MAX */ d__1 = abs(*b), d__2 = abs(*c__); bcmax = max(d__1,d__2); /* Computing MIN */ d__1 = abs(*b), d__2 = abs(*c__); bcmis = min(d__1,d__2) * d_sign(&c_b15, b) * d_sign(&c_b15, c__); /* Computing MAX */ d__1 = abs(p); scale = max(d__1,bcmax); z__ = p / scale * p + bcmax / scale * bcmis; /* If Z is of the order of the machine accuracy, postpone the decision on the nature of eigenvalues */ if (z__ >= eps * 4.) { /* Real eigenvalues. Compute A and D. */ d__1 = sqrt(scale) * sqrt(z__); z__ = p + d_sign(&d__1, &p); *a = *d__ + z__; *d__ -= bcmax / z__ * bcmis; /* Compute B and the rotation matrix */ tau = dlapy2_(c__, &z__); *cs = z__ / tau; *sn = *c__ / tau; *b -= *c__; *c__ = 0.; } else { /* Complex eigenvalues, or real (almost) equal eigenvalues. Make diagonal elements equal. */ sigma = *b + *c__; tau = dlapy2_(&sigma, &temp); *cs = sqrt((abs(sigma) / tau + 1.) * .5); *sn = -(p / (tau * *cs)) * d_sign(&c_b15, &sigma); /* Compute [ AA BB ] = [ A B ] [ CS -SN ] [ CC DD ] [ C D ] [ SN CS ] */ aa = *a * *cs + *b * *sn; bb = -(*a) * *sn + *b * *cs; cc = *c__ * *cs + *d__ * *sn; dd = -(*c__) * *sn + *d__ * *cs; /* Compute [ A B ] = [ CS SN ] [ AA BB ] [ C D ] [-SN CS ] [ CC DD ] */ *a = aa * *cs + cc * *sn; *b = bb * *cs + dd * *sn; *c__ = -aa * *sn + cc * *cs; *d__ = -bb * *sn + dd * *cs; temp = (*a + *d__) * .5; *a = temp; *d__ = temp; if (*c__ != 0.) { if (*b != 0.) { if (d_sign(&c_b15, b) == d_sign(&c_b15, c__)) { /* Real eigenvalues: reduce to upper triangular form */ sab = sqrt((abs(*b))); sac = sqrt((abs(*c__))); d__1 = sab * sac; p = d_sign(&d__1, c__); tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1))); *a = temp + p; *d__ = temp - p; *b -= *c__; *c__ = 0.; cs1 = sab * tau; sn1 = sac * tau; temp = *cs * cs1 - *sn * sn1; *sn = *cs * sn1 + *sn * cs1; *cs = temp; } } else { *b = -(*c__); *c__ = 0.; temp = *cs; *cs = -(*sn); *sn = temp; } } } } L10: /* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */ *rt1r = *a; *rt2r = *d__; if (*c__ == 0.) { *rt1i = 0.; *rt2i = 0.; } else { *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__))); *rt2i = -(*rt1i); } return 0; /* End of DLANV2 */ } /* dlanv2_ */ doublereal dlapy2_(doublereal *x, doublereal *y) { /* System generated locals */ doublereal ret_val, d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal xabs, yabs, w, z__; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary overflow. Arguments ========= X (input) DOUBLE PRECISION Y (input) DOUBLE PRECISION X and Y specify the values x and y. ===================================================================== */ xabs = abs(*x); yabs = abs(*y); w = max(xabs,yabs); z__ = min(xabs,yabs); if (z__ == 0.) { ret_val = w; } else { /* Computing 2nd power */ d__1 = z__ / w; ret_val = w * sqrt(d__1 * d__1 + 1.); } return ret_val; /* End of DLAPY2 */ } /* dlapy2_ */ /* Subroutine */ int dlaqr0_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static integer ndfl, kbot, nmin; static doublereal swap; static integer ktop; static doublereal zdum[1] /* was [1][1] */; static integer kacc22, i__, k; static logical nwinc; static integer itmax, nsmax, nwmax, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr3_( logical *, logical *, integer *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaqr4_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dlaqr5_(logical *, logical *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *); static doublereal aa, bb, cc, dd; static integer ld; static doublereal cs; static integer nh, nibble, it, ks, kt; static doublereal sn; static integer ku, kv, ls, ns; static doublereal ss; static integer nw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static char jbcmpz[2]; static logical sorted; static integer lwkopt, inf, kdu, nho, nve, kwh, nsr, nwr, kwv; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAQR0 computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, H(ILO,ILO-1) is zero. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise, ILO and IHI should be set to 1 and N, respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and WANTT is .TRUE., then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is .FALSE., then the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) This subroutine may explicitly set H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (IHI) WI (output) DOUBLE PRECISION array, dimension (IHI) The real and imaginary parts, respectively, of the computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored WR(ILO:IHI) and WI(ILO:IHI). If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) If WANTZ is .FALSE., then Z is not referenced. If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal Schur factor of H(ILO:IHI,ILO:IHI). (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if WANTZ is .TRUE. then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK On exit, if LWORK = -1, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DLAQR0 does a workspace query. In this case, DLAQR0 checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .GT. 0: if INFO = i, DLAQR0 failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and WANT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z(ILO:IHI,ILOZ:IHIZ) = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal matrix in (*) (regard- less of the value of WANTT.) If INFO .GT. 0 and WANTZ is .FALSE., then Z is not accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== Exceptional deflation windows: try to cure rare . slow convergence by increasing the size of the . deflation window after KEXNW iterations. ===== ==== Exceptional shifts: try to cure rare slow convergence . with ad-hoc exceptional shifts every KEXSH iterations. . The constants WILK1 and WILK2 are used to form the . exceptional shifts. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; /* ==== Quick return for N = 0: nothing to do. ==== */ if (*n == 0) { work[1] = 1.; return 0; } /* ==== Set up job flags for ILAENV. ==== */ if (*wantt) { *(unsigned char *)jbcmpz = 'S'; } else { *(unsigned char *)jbcmpz = 'E'; } if (*wantz) { *(unsigned char *)&jbcmpz[1] = 'V'; } else { *(unsigned char *)&jbcmpz[1] = 'N'; } /* ==== Tiny matrices must use DLAHQR. ==== */ if (*n <= 11) { /* ==== Estimate optimal workspace. ==== */ lwkopt = 1; if (*lwork != -1) { dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], & wi[1], iloz, ihiz, &z__[z_offset], ldz, info); } } else { /* ==== Use small bulge multi-shift QR with aggressive early . deflation on larger-than-tiny matrices. ==== ==== Hope for the best. ==== */ *info = 0; /* ==== NWR = recommended deflation window size. At this . point, N .GT. NTINY = 11, so there is enough . subdiagonal workspace for NWR.GE.2 as required. . (In fact, there is enough subdiagonal space for . NWR.GE.3.) ==== */ nwr = ilaenv_(&c__13, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nwr = max(2,nwr); /* Computing MIN */ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2); nwr = min(i__1,nwr); nw = nwr; /* ==== NSR = recommended number of simultaneous shifts. . At this point N .GT. NTINY = 11, so there is at . enough subdiagonal workspace for NSR to be even . and greater than or equal to two as required. ==== */ nsr = ilaenv_(&c__15, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); /* Computing MIN */ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi - *ilo; nsr = min(i__1,i__2); /* Computing MAX */ i__1 = 2, i__2 = nsr - nsr % 2; nsr = max(i__1,i__2); /* ==== Estimate optimal workspace ==== ==== Workspace query call to DLAQR3 ==== */ i__1 = nwr + 1; dlaqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1], &c_n1); /* ==== Optimal workspace = MAX(DLAQR5, DLAQR3) ==== Computing MAX */ i__1 = nsr * 3 / 2, i__2 = (integer) work[1]; lwkopt = max(i__1,i__2); /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== */ nmin = ilaenv_(&c__12, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen) 6, (ftnlen)2); nmin = max(11,nmin); /* ==== Nibble crossover point ==== */ nibble = ilaenv_(&c__14, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); nibble = max(0,nibble); /* ==== Accumulate reflections during ttswp? Use block . 2-by-2 structure during matrix-matrix multiply? ==== */ kacc22 = ilaenv_(&c__16, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); kacc22 = max(0,kacc22); kacc22 = min(2,kacc22); /* ==== NWMAX = the largest possible deflation window for . which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n - 1) / 3, i__2 = *lwork / 2; nwmax = min(i__1,i__2); /* ==== NSMAX = the Largest number of simultaneous shifts . for which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3; nsmax = min(i__1,i__2); nsmax -= nsmax % 2; /* ==== NDFL: an iteration count restarted at deflation. ==== */ ndfl = 1; /* ==== ITMAX = iteration limit ==== Computing MAX */ i__1 = 10, i__2 = *ihi - *ilo + 1; itmax = 30 * max(i__1,i__2); /* ==== Last row and column in the active block ==== */ kbot = *ihi; /* ==== Main Loop ==== */ i__1 = itmax; for (it = 1; it <= i__1; ++it) { /* ==== Done when KBOT falls below ILO ==== */ if (kbot < *ilo) { goto L90; } /* ==== Locate active block ==== */ i__2 = *ilo + 1; for (k = kbot; k >= i__2; --k) { if (h__[k + (k - 1) * h_dim1] == 0.) { goto L20; } /* L10: */ } k = *ilo; L20: ktop = k; /* ==== Select deflation window size ==== */ nh = kbot - ktop + 1; if (ndfl < 5 || nh < nw) { /* ==== Typical deflation window. If possible and . advisable, nibble the entire active block. . If not, use size NWR or NWR+1 depending upon . which has the smaller corresponding subdiagonal . entry (a heuristic). ==== */ nwinc = TRUE_; if (nh <= min(nmin,nwmax)) { nw = nh; } else { /* Computing MIN */ i__2 = min(nwr,nh); nw = min(i__2,nwmax); if (nw < nwmax) { if (nw >= nh - 1) { nw = nh; } else { kwtop = kbot - nw + 1; if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1)) > (d__2 = h__[kwtop - 1 + ( kwtop - 2) * h_dim1], abs(d__2))) { ++nw; } } } } } else { /* ==== Exceptional deflation window. If there have . been no deflations in KEXNW or more iterations, . then vary the deflation window size. At first, . because, larger windows are, in general, more . powerful than smaller ones, rapidly increase the . window up to the maximum reasonable and possible. . Then maybe try a slightly smaller window. ==== */ if (nwinc && nw < min(nwmax,nh)) { /* Computing MIN */ i__2 = min(nwmax,nh), i__3 = nw << 1; nw = min(i__2,i__3); } else { nwinc = FALSE_; if (nw == nh && nh > 2) { nw = nh - 1; } } } /* ==== Aggressive early deflation: . split workspace under the subdiagonal into . - an nw-by-nw work array V in the lower . left-hand-corner, . - an NW-by-at-least-NW-but-more-is-better . (NW-by-NHO) horizontal work array along . the bottom edge, . - an at-least-NW-but-more-is-better (NHV-by-NW) . vertical work array along the left-hand-edge. . ==== */ kv = *n - nw + 1; kt = nw + 1; nho = *n - nw - 1 - kt + 1; kwv = nw + 2; nve = *n - nw - kwv + 1; /* ==== Aggressive early deflation ==== */ dlaqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork); /* ==== Adjust KBOT accounting for new deflations. ==== */ kbot -= ld; /* ==== KS points to the shifts. ==== */ ks = kbot - ls + 1; /* ==== Skip an expensive QR sweep if there is a (partly . heuristic) reason to expect that many eigenvalues . will deflate without it. Here, the QR sweep is . skipped if many eigenvalues have just been deflated . or if the remaining active block is small. */ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min( nmin,nwmax)) { /* ==== NS = nominal number of simultaneous shifts. . This may be lowered (slightly) if DLAQR3 . did not provide that many shifts. ==== Computing MIN Computing MAX */ i__4 = 2, i__5 = kbot - ktop; i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5); ns = min(i__2,i__3); ns -= ns % 2; /* ==== If there have been no deflations . in a multiple of KEXSH iterations, . then try exceptional shifts. . Otherwise use shifts provided by . DLAQR3 above or from the eigenvalues . of a trailing principal submatrix. ==== */ if (ndfl % 6 == 0) { ks = kbot - ns + 1; /* Computing MAX */ i__3 = ks + 1, i__4 = ktop + 2; i__2 = max(i__3,i__4); for (i__ = kbot; i__ >= i__2; i__ += -2) { ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2)); aa = ss * .75 + h__[i__ + i__ * h_dim1]; bb = ss; cc = ss * -.4375; dd = aa; dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1] , &wr[i__], &wi[i__], &cs, &sn); /* L30: */ } if (ks == ktop) { wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1]; wi[ks + 1] = 0.; wr[ks] = wr[ks + 1]; wi[ks] = wi[ks + 1]; } } else { /* ==== Got NS/2 or fewer shifts? Use DLAQR4 or . DLAHQR on a trailing principal submatrix to . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, . there is enough space below the subdiagonal . to fit an NS-by-NS scratch array.) ==== */ if (kbot - ks + 1 <= ns / 2) { ks = kbot - ns + 1; kt = *n - ns + 1; dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, & h__[kt + h_dim1], ldh); if (ns > nmin) { dlaqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[ kt + h_dim1], ldh, &wr[ks], &wi[ks], & c__1, &c__1, zdum, &c__1, &work[1], lwork, &inf); } else { dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[ kt + h_dim1], ldh, &wr[ks], &wi[ks], & c__1, &c__1, zdum, &c__1, &inf); } ks += inf; /* ==== In case of a rare QR failure use . eigenvalues of the trailing 2-by-2 . principal submatrix. ==== */ if (ks >= kbot) { aa = h__[kbot - 1 + (kbot - 1) * h_dim1]; cc = h__[kbot + (kbot - 1) * h_dim1]; bb = h__[kbot - 1 + kbot * h_dim1]; dd = h__[kbot + kbot * h_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn) ; ks = kbot - 1; } } if (kbot - ks + 1 > ns) { /* ==== Sort the shifts (Helps a little) . Bubble sort keeps complex conjugate . pairs together. ==== */ sorted = FALSE_; i__2 = ks + 1; for (k = kbot; k >= i__2; --k) { if (sorted) { goto L60; } sorted = TRUE_; i__3 = k - 1; for (i__ = ks; i__ <= i__3; ++i__) { if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[ i__], abs(d__2)) < (d__3 = wr[i__ + 1] , abs(d__3)) + (d__4 = wi[i__ + 1], abs(d__4))) { sorted = FALSE_; swap = wr[i__]; wr[i__] = wr[i__ + 1]; wr[i__ + 1] = swap; swap = wi[i__]; wi[i__] = wi[i__ + 1]; wi[i__ + 1] = swap; } /* L40: */ } /* L50: */ } L60: ; } /* ==== Shuffle shifts into pairs of real shifts . and pairs of complex conjugate shifts . assuming complex conjugate shifts are . already adjacent to one another. (Yes, . they are.) ==== */ i__2 = ks + 2; for (i__ = kbot; i__ >= i__2; i__ += -2) { if (wi[i__] != -wi[i__ - 1]) { swap = wr[i__]; wr[i__] = wr[i__ - 1]; wr[i__ - 1] = wr[i__ - 2]; wr[i__ - 2] = swap; swap = wi[i__]; wi[i__] = wi[i__ - 1]; wi[i__ - 1] = wi[i__ - 2]; wi[i__ - 2] = swap; } /* L70: */ } } /* ==== If there are only two shifts and both are . real, then use only one. ==== */ if (kbot - ks + 1 == 2) { if (wi[kbot] == 0.) { if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs( d__1)) < (d__2 = wr[kbot - 1] - h__[kbot + kbot * h_dim1], abs(d__2))) { wr[kbot - 1] = wr[kbot]; } else { wr[kbot] = wr[kbot - 1]; } } } /* ==== Use up to NS of the the smallest magnatiude . shifts. If there aren't NS shifts available, . then use them all, possibly dropping one to . make the number of shifts even. ==== Computing MIN */ i__2 = ns, i__3 = kbot - ks + 1; ns = min(i__2,i__3); ns -= ns % 2; ks = kbot - ns + 1; /* ==== Small-bulge multi-shift QR sweep: . split workspace under the subdiagonal into . - a KDU-by-KDU work array U in the lower . left-hand-corner, . - a KDU-by-at-least-KDU-but-more-is-better . (KDU-by-NHo) horizontal work array WH along . the bottom edge, . - and an at-least-KDU-but-more-is-better-by-KDU . (NVE-by-KDU) vertical work WV arrow along . the left-hand-edge. ==== */ kdu = ns * 3 - 3; ku = *n - kdu + 1; kwh = kdu + 1; nho = *n - kdu - 3 - (kdu + 1) + 1; kwv = kdu + 4; nve = *n - kdu - kwv + 1; /* ==== Small-bulge multi-shift QR sweep ==== */ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks], &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1], ldh); } /* ==== Note progress (or the lack of it). ==== */ if (ld > 0) { ndfl = 1; } else { ++ndfl; } /* ==== End of main loop ==== L80: */ } /* ==== Iteration limit exceeded. Set INFO to show where . the problem occurred and exit. ==== */ *info = kbot; L90: ; } /* ==== Return the optimal value of LWORK. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR0 ==== */ return 0; } /* dlaqr0_ */ /* Subroutine */ int dlaqr1_(integer *n, doublereal *h__, integer *ldh, doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2, doublereal *v) { /* System generated locals */ integer h_dim1, h_offset; doublereal d__1, d__2, d__3; /* Local variables */ static doublereal s, h21s, h31s; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Given a 2-by-2 or 3-by-3 matrix H, DLAQR1 sets v to a scalar multiple of the first column of the product (*) K = (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) scaling to avoid overflows and most underflows. It is assumed that either 1) sr1 = sr2 and si1 = -si2 or 2) si1 = si2 = 0. This is useful for starting double implicit shift bulges in the QR algorithm. N (input) integer Order of the matrix H. N must be either 2 or 3. H (input) DOUBLE PRECISION array of dimension (LDH,N) The 2-by-2 or 3-by-3 matrix H in (*). LDH (input) integer The leading dimension of H as declared in the calling procedure. LDH.GE.N SR1 (input) DOUBLE PRECISION SI1 The shifts in (*). SR2 SI2 V (output) DOUBLE PRECISION array of dimension N A scalar multiple of the first column of the matrix K in (*). ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --v; /* Function Body */ if (*n == 2) { s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = h__[h_dim1 + 2], abs(d__2)); if (s == 0.) { v[1] = 0.; v[2] = 0.; } else { h21s = h__[h_dim1 + 2] / s; v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s); v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * sr2); } } else { s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = h__[h_dim1 + 2], abs(d__2)) + (d__3 = h__[h_dim1 + 3], abs( d__3)); if (s == 0.) { v[1] = 0.; v[2] = 0.; v[3] = 0.; } else { h21s = h__[h_dim1 + 2] / s; h31s = h__[h_dim1 + 3] / s; v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[ h_dim1 * 3 + 1] * h31s; v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * sr2) + h__[h_dim1 * 3 + 2] * h31s; v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - * sr2) + h21s * h__[(h_dim1 << 1) + 3]; } } return 0; } /* dlaqr1_ */ /* Subroutine */ int dlaqr2_(logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) { /* System generated locals */ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta; static integer kend, kcol, info, ifst, ilst, ltop, krow, i__, j, k; static doublereal s; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dgemm_(char *, char *, integer *, integer * , integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical bulge; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer infqr, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal aa, bb, cc; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal dd, cs; extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer jw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dtrexc_(char *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static logical sorted; static doublereal smlnum; static integer lwkopt; static doublereal evi, evk, foo; static integer kln; static doublereal tau, ulp; static integer lwk1, lwk2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This subroutine is identical to DLAQR3 except that it avoids recursion by calling DLAHQR instead of DLAQR4. ****************************************************************** Aggressive early deflation: This subroutine accepts as input an upper Hessenberg matrix H and performs an orthogonal similarity transformation designed to detect and deflate fully converged eigenvalues from a trailing principal submatrix. On output H has been over- written by a new Hessenberg matrix that is a perturbation of an orthogonal similarity transformation of H. It is to be hoped that the final version of H has many zero subdiagonal entries. ****************************************************************** WANTT (input) LOGICAL If .TRUE., then the Hessenberg matrix H is fully updated so that the quasi-triangular Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then only enough of H is updated to preserve the eigenvalues. WANTZ (input) LOGICAL If .TRUE., then the orthogonal matrix Z is updated so so that the orthogonal Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then Z is not referenced. N (input) INTEGER The order of the matrix H and (if WANTZ is .TRUE.) the order of the orthogonal matrix Z. KTOP (input) INTEGER It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. KBOT (input) INTEGER It is assumed without a check that either KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. NW (input) INTEGER Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On input the initial N-by-N section of H stores the Hessenberg matrix undergoing aggressive early deflation. On output H has been transformed by an orthogonal similarity transformation, perturbed, and the returned to Hessenberg form that (it is to be hoped) has some zero subdiagonal entries. LDH (input) integer Leading dimension of H just as declared in the calling subroutine. N .LE. LDH ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) IF WANTZ is .TRUE., then on output, the orthogonal similarity transformation mentioned above has been accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ is .FALSE., then Z is unreferenced. LDZ (input) integer The leading dimension of Z just as declared in the calling subroutine. 1 .LE. LDZ. NS (output) integer The number of unconverged (ie approximate) eigenvalues returned in SR and SI that may be used as shifts by the calling subroutine. ND (output) integer The number of converged eigenvalues uncovered by this subroutine. SR (output) DOUBLE PRECISION array, dimension KBOT SI (output) DOUBLE PRECISION array, dimension KBOT On output, the real and imaginary parts of approximate eigenvalues that may be used for shifts are stored in SR(KBOT-ND-NS+1) through SR(KBOT-ND) and SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. The real and imaginary parts of converged eigenvalues are stored in SR(KBOT-ND+1) through SR(KBOT) and SI(KBOT-ND+1) through SI(KBOT), respectively. V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) An NW-by-NW work array. LDV (input) integer scalar The leading dimension of V just as declared in the calling subroutine. NW .LE. LDV NH (input) integer scalar The number of columns of T. NH.GE.NW. T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) LDT (input) integer The leading dimension of T just as declared in the calling subroutine. NW .LE. LDT NV (input) integer The number of rows of work array WV available for workspace. NV.GE.NW. WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) LDWV (input) integer The leading dimension of W just as declared in the calling subroutine. NW .LE. LDV WORK (workspace) DOUBLE PRECISION array, dimension LWORK. On exit, WORK(1) is set to an estimate of the optimal value of LWORK for the given values of N, NW, KTOP and KBOT. LWORK (input) integer The dimension of the work array WORK. LWORK = 2*NW suffices, but greater efficiency may result from larger values of LWORK. If LWORK = -1, then a workspace query is assumed; DLAQR2 only estimates the optimal workspace size for the given values of N, NW, KTOP and KBOT. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ ==== Estimate optimal workspace. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --sr; --si; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; --work; /* Function Body */ /* Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); if (jw <= 2) { lwkopt = 1; } else { /* ==== Workspace query call to DGEHRD ==== */ i__1 = jw - 1; dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk1 = (integer) work[1]; /* ==== Workspace query call to DORGHR ==== */ i__1 = jw - 1; dorghr_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk2 = (integer) work[1]; /* ==== Optimal workspace ==== */ lwkopt = jw + max(lwk1,lwk2); } /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== Nothing to do ... ... for an empty active block ... ==== */ *ns = 0; *nd = 0; if (*ktop > *kbot) { return 0; } /* ... nor for an empty deflation window. ==== */ if (*nw < 1) { return 0; } /* ==== Machine constants ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Setup deflation window ==== Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); kwtop = *kbot - jw + 1; if (kwtop == *ktop) { s = 0.; } else { s = h__[kwtop + (kwtop - 1) * h_dim1]; } if (*kbot == kwtop) { /* ==== 1-by-1 deflation window: not much to do ==== */ sr[kwtop] = h__[kwtop + kwtop * h_dim1]; si[kwtop] = 0.; *ns = 1; *nd = 0; /* Computing MAX */ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( d__1)); if (abs(s) <= max(d__2,d__3)) { *ns = 0; *nd = 1; if (kwtop > *ktop) { h__[kwtop + (kwtop - 1) * h_dim1] = 0.; } } return 0; } /* ==== Convert to spike-triangular form. (In case of a . rare QR failure, this routine continues to do . aggressive early deflation using that part of . the deflation window that converged using INFQR . here and there to keep track.) ==== */ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt); i__1 = jw - 1; i__2 = *ldh + 1; i__3 = *ldt + 1; dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3); dlaset_("A", &jw, &jw, &c_b29, &c_b15, &v[v_offset], ldv); dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr); /* ==== DTREXC needs a clean margin near the diagonal ==== */ i__1 = jw - 3; for (j = 1; j <= i__1; ++j) { t[j + 2 + j * t_dim1] = 0.; t[j + 3 + j * t_dim1] = 0.; /* L10: */ } if (jw > 2) { t[jw + (jw - 2) * t_dim1] = 0.; } /* ==== Deflation detection loop ==== */ *ns = jw; ilst = infqr + 1; L20: if (ilst <= *ns) { if (*ns == 1) { bulge = FALSE_; } else { bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; } /* ==== Small spike tip test for deflation ==== */ if (! bulge) { /* ==== Real eigenvalue ==== */ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * foo; if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) { /* ==== Deflatable ==== */ --(*ns); } else { /* ==== Undeflatable. Move it up out of the way. . (DTREXC can not fail in this case.) ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ++ilst; } } else { /* ==== Complex conjugate pair ==== */ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* ns - 1 + *ns * t_dim1], abs(d__2))); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); /* Computing MAX */ d__5 = smlnum, d__6 = ulp * foo; if (max(d__3,d__4) <= max(d__5,d__6)) { /* ==== Deflatable ==== */ *ns += -2; } else { /* ==== Undflatable. Move them up out of the way. . Fortunately, DTREXC does the right thing with . ILST in case of a rare exchange failure. ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ilst += 2; } } /* ==== End deflation detection loop ==== */ goto L20; } /* ==== Return to Hessenberg form ==== */ if (*ns == 0) { s = 0.; } if (*ns < jw) { /* ==== sorting diagonal blocks of T improves accuracy for . graded matrices. Bubble sort deals well with . exchange failures. ==== */ sorted = FALSE_; i__ = *ns + 1; L30: if (sorted) { goto L50; } sorted = TRUE_; kend = i__ - 1; i__ = infqr + 1; if (i__ == *ns) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } L40: if (k <= kend) { if (k == i__ + 1) { evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); } else { evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = t[i__ + (i__ + 1) * t_dim1], abs(d__2))); } if (k == kend) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else if (t[k + 1 + k * t_dim1] == 0.) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else { evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + (k + 1) * t_dim1], abs(d__2))); } if (evi >= evk) { i__ = k; } else { sorted = FALSE_; ifst = i__; ilst = k; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); if (info == 0) { i__ = ilst; } else { i__ = k; } } if (i__ == kend) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } goto L40; } goto L30; L50: ; } /* ==== Restore shift/eigenvalue array from T ==== */ i__ = jw; L60: if (i__ >= infqr + 1) { if (i__ == infqr + 1) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else { aa = t[i__ - 1 + (i__ - 1) * t_dim1]; cc = t[i__ + (i__ - 1) * t_dim1]; bb = t[i__ - 1 + i__ * t_dim1]; dd = t[i__ + i__ * t_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & sn); i__ += -2; } goto L60; } if (*ns < jw || s == 0.) { if (*ns > 1 && s != 0.) { /* ==== Reflect spike back into lower triangle ==== */ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1); beta = work[1]; dlarfg_(ns, &beta, &work[2], &c__1, &tau); work[1] = 1.; i__1 = jw - 2; i__2 = jw - 2; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &t[t_dim1 + 3], ldt); dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, & work[jw + 1]); i__1 = *lwork - jw; dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); } /* ==== Copy updated reduced window into place ==== */ if (kwtop > 1) { h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; } dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1] , ldh); i__1 = jw - 1; i__2 = *ldt + 1; i__3 = *ldh + 1; dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3); /* ==== Accumulate orthogonal matrix in order update . H and Z, if requested. (A modified version . of DORGHR that accumulates block Householder . transformations into V directly might be . marginally more efficient than the following.) ==== */ if (*ns > 1 && s != 0.) { i__1 = *lwork - jw; dorghr_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); dgemm_("N", "N", &jw, ns, ns, &c_b15, &v[v_offset], ldv, &t[ t_offset], ldt, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &jw, ns, &wv[wv_offset], ldwv, &v[v_offset], ldv); } /* ==== Update vertical slab in H ==== */ if (*wantt) { ltop = 1; } else { ltop = *ktop; } i__1 = kwtop - 1; i__2 = *nv; for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = kwtop - krow; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh); /* L70: */ } /* ==== Update horizontal slab in H ==== */ if (*wantt) { i__2 = *n; i__1 = *nh; for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; kcol += i__1) { /* Computing MIN */ i__3 = *nh, i__4 = *n - kcol + 1; kln = min(i__3,i__4); dgemm_("C", "N", &jw, &kln, &jw, &c_b15, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &c_b29, &t[t_offset], ldt); dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh); /* L80: */ } } /* ==== Update vertical slab in Z ==== */ if (*wantz) { i__1 = *ihiz; i__2 = *nv; for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = *ihiz - krow + 1; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &c_b29, &wv[ wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz); /* L90: */ } } } /* ==== Return the number of deflations ... ==== */ *nd = jw - *ns; /* ==== ... and the number of shifts. (Subtracting . INFQR from the spike length takes care . of the case of a rare QR failure while . calculating eigenvalues of the deflation . window.) ==== */ *ns -= infqr; /* ==== Return optimal workspace. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR2 ==== */ return 0; } /* dlaqr2_ */ /* Subroutine */ int dlaqr3_(logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) { /* System generated locals */ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta; static integer kend, kcol, info, nmin, ifst, ilst, ltop, krow, i__, j, k; static doublereal s; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dgemm_(char *, char *, integer *, integer * , integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical bulge; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer infqr, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr4_( logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static doublereal aa, bb, cc; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal dd, cs; extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer jw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dtrexc_(char *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static logical sorted; static doublereal smlnum; static integer lwkopt; static doublereal evi, evk, foo; static integer kln; static doublereal tau, ulp; static integer lwk1, lwk2, lwk3; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 ****************************************************************** Aggressive early deflation: This subroutine accepts as input an upper Hessenberg matrix H and performs an orthogonal similarity transformation designed to detect and deflate fully converged eigenvalues from a trailing principal submatrix. On output H has been over- written by a new Hessenberg matrix that is a perturbation of an orthogonal similarity transformation of H. It is to be hoped that the final version of H has many zero subdiagonal entries. ****************************************************************** WANTT (input) LOGICAL If .TRUE., then the Hessenberg matrix H is fully updated so that the quasi-triangular Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then only enough of H is updated to preserve the eigenvalues. WANTZ (input) LOGICAL If .TRUE., then the orthogonal matrix Z is updated so so that the orthogonal Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then Z is not referenced. N (input) INTEGER The order of the matrix H and (if WANTZ is .TRUE.) the order of the orthogonal matrix Z. KTOP (input) INTEGER It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. KBOT (input) INTEGER It is assumed without a check that either KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. NW (input) INTEGER Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On input the initial N-by-N section of H stores the Hessenberg matrix undergoing aggressive early deflation. On output H has been transformed by an orthogonal similarity transformation, perturbed, and the returned to Hessenberg form that (it is to be hoped) has some zero subdiagonal entries. LDH (input) integer Leading dimension of H just as declared in the calling subroutine. N .LE. LDH ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) IF WANTZ is .TRUE., then on output, the orthogonal similarity transformation mentioned above has been accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ is .FALSE., then Z is unreferenced. LDZ (input) integer The leading dimension of Z just as declared in the calling subroutine. 1 .LE. LDZ. NS (output) integer The number of unconverged (ie approximate) eigenvalues returned in SR and SI that may be used as shifts by the calling subroutine. ND (output) integer The number of converged eigenvalues uncovered by this subroutine. SR (output) DOUBLE PRECISION array, dimension KBOT SI (output) DOUBLE PRECISION array, dimension KBOT On output, the real and imaginary parts of approximate eigenvalues that may be used for shifts are stored in SR(KBOT-ND-NS+1) through SR(KBOT-ND) and SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. The real and imaginary parts of converged eigenvalues are stored in SR(KBOT-ND+1) through SR(KBOT) and SI(KBOT-ND+1) through SI(KBOT), respectively. V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) An NW-by-NW work array. LDV (input) integer scalar The leading dimension of V just as declared in the calling subroutine. NW .LE. LDV NH (input) integer scalar The number of columns of T. NH.GE.NW. T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) LDT (input) integer The leading dimension of T just as declared in the calling subroutine. NW .LE. LDT NV (input) integer The number of rows of work array WV available for workspace. NV.GE.NW. WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) LDWV (input) integer The leading dimension of W just as declared in the calling subroutine. NW .LE. LDV WORK (workspace) DOUBLE PRECISION array, dimension LWORK. On exit, WORK(1) is set to an estimate of the optimal value of LWORK for the given values of N, NW, KTOP and KBOT. LWORK (input) integer The dimension of the work array WORK. LWORK = 2*NW suffices, but greater efficiency may result from larger values of LWORK. If LWORK = -1, then a workspace query is assumed; DLAQR3 only estimates the optimal workspace size for the given values of N, NW, KTOP and KBOT. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================== ==== Estimate optimal workspace. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --sr; --si; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; --work; /* Function Body */ /* Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); if (jw <= 2) { lwkopt = 1; } else { /* ==== Workspace query call to DGEHRD ==== */ i__1 = jw - 1; dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk1 = (integer) work[1]; /* ==== Workspace query call to DORGHR ==== */ i__1 = jw - 1; dorghr_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk2 = (integer) work[1]; /* ==== Workspace query call to DLAQR4 ==== */ dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[1], &si[1], &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, & infqr); lwk3 = (integer) work[1]; /* ==== Optimal workspace ==== Computing MAX */ i__1 = jw + max(lwk1,lwk2); lwkopt = max(i__1,lwk3); } /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== Nothing to do ... ... for an empty active block ... ==== */ *ns = 0; *nd = 0; if (*ktop > *kbot) { return 0; } /* ... nor for an empty deflation window. ==== */ if (*nw < 1) { return 0; } /* ==== Machine constants ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Setup deflation window ==== Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); kwtop = *kbot - jw + 1; if (kwtop == *ktop) { s = 0.; } else { s = h__[kwtop + (kwtop - 1) * h_dim1]; } if (*kbot == kwtop) { /* ==== 1-by-1 deflation window: not much to do ==== */ sr[kwtop] = h__[kwtop + kwtop * h_dim1]; si[kwtop] = 0.; *ns = 1; *nd = 0; /* Computing MAX */ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( d__1)); if (abs(s) <= max(d__2,d__3)) { *ns = 0; *nd = 1; if (kwtop > *ktop) { h__[kwtop + (kwtop - 1) * h_dim1] = 0.; } } return 0; } /* ==== Convert to spike-triangular form. (In case of a . rare QR failure, this routine continues to do . aggressive early deflation using that part of . the deflation window that converged using INFQR . here and there to keep track.) ==== */ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt); i__1 = jw - 1; i__2 = *ldh + 1; i__3 = *ldt + 1; dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3); dlaset_("A", &jw, &jw, &c_b29, &c_b15, &v[v_offset], ldv); nmin = ilaenv_(&c__12, "DLAQR3", "SV", &jw, &c__1, &jw, lwork, (ftnlen)6, (ftnlen)2); if (jw > nmin) { dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1], lwork, &infqr); } else { dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr); } /* ==== DTREXC needs a clean margin near the diagonal ==== */ i__1 = jw - 3; for (j = 1; j <= i__1; ++j) { t[j + 2 + j * t_dim1] = 0.; t[j + 3 + j * t_dim1] = 0.; /* L10: */ } if (jw > 2) { t[jw + (jw - 2) * t_dim1] = 0.; } /* ==== Deflation detection loop ==== */ *ns = jw; ilst = infqr + 1; L20: if (ilst <= *ns) { if (*ns == 1) { bulge = FALSE_; } else { bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; } /* ==== Small spike tip test for deflation ==== */ if (! bulge) { /* ==== Real eigenvalue ==== */ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * foo; if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) { /* ==== Deflatable ==== */ --(*ns); } else { /* ==== Undeflatable. Move it up out of the way. . (DTREXC can not fail in this case.) ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ++ilst; } } else { /* ==== Complex conjugate pair ==== */ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* ns - 1 + *ns * t_dim1], abs(d__2))); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); /* Computing MAX */ d__5 = smlnum, d__6 = ulp * foo; if (max(d__3,d__4) <= max(d__5,d__6)) { /* ==== Deflatable ==== */ *ns += -2; } else { /* ==== Undflatable. Move them up out of the way. . Fortunately, DTREXC does the right thing with . ILST in case of a rare exchange failure. ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ilst += 2; } } /* ==== End deflation detection loop ==== */ goto L20; } /* ==== Return to Hessenberg form ==== */ if (*ns == 0) { s = 0.; } if (*ns < jw) { /* ==== sorting diagonal blocks of T improves accuracy for . graded matrices. Bubble sort deals well with . exchange failures. ==== */ sorted = FALSE_; i__ = *ns + 1; L30: if (sorted) { goto L50; } sorted = TRUE_; kend = i__ - 1; i__ = infqr + 1; if (i__ == *ns) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } L40: if (k <= kend) { if (k == i__ + 1) { evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); } else { evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = t[i__ + (i__ + 1) * t_dim1], abs(d__2))); } if (k == kend) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else if (t[k + 1 + k * t_dim1] == 0.) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else { evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + (k + 1) * t_dim1], abs(d__2))); } if (evi >= evk) { i__ = k; } else { sorted = FALSE_; ifst = i__; ilst = k; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); if (info == 0) { i__ = ilst; } else { i__ = k; } } if (i__ == kend) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } goto L40; } goto L30; L50: ; } /* ==== Restore shift/eigenvalue array from T ==== */ i__ = jw; L60: if (i__ >= infqr + 1) { if (i__ == infqr + 1) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else { aa = t[i__ - 1 + (i__ - 1) * t_dim1]; cc = t[i__ + (i__ - 1) * t_dim1]; bb = t[i__ - 1 + i__ * t_dim1]; dd = t[i__ + i__ * t_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & sn); i__ += -2; } goto L60; } if (*ns < jw || s == 0.) { if (*ns > 1 && s != 0.) { /* ==== Reflect spike back into lower triangle ==== */ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1); beta = work[1]; dlarfg_(ns, &beta, &work[2], &c__1, &tau); work[1] = 1.; i__1 = jw - 2; i__2 = jw - 2; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &t[t_dim1 + 3], ldt); dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, & work[jw + 1]); i__1 = *lwork - jw; dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); } /* ==== Copy updated reduced window into place ==== */ if (kwtop > 1) { h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; } dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1] , ldh); i__1 = jw - 1; i__2 = *ldt + 1; i__3 = *ldh + 1; dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3); /* ==== Accumulate orthogonal matrix in order update . H and Z, if requested. (A modified version . of DORGHR that accumulates block Householder . transformations into V directly might be . marginally more efficient than the following.) ==== */ if (*ns > 1 && s != 0.) { i__1 = *lwork - jw; dorghr_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); dgemm_("N", "N", &jw, ns, ns, &c_b15, &v[v_offset], ldv, &t[ t_offset], ldt, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &jw, ns, &wv[wv_offset], ldwv, &v[v_offset], ldv); } /* ==== Update vertical slab in H ==== */ if (*wantt) { ltop = 1; } else { ltop = *ktop; } i__1 = kwtop - 1; i__2 = *nv; for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = kwtop - krow; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh); /* L70: */ } /* ==== Update horizontal slab in H ==== */ if (*wantt) { i__2 = *n; i__1 = *nh; for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; kcol += i__1) { /* Computing MIN */ i__3 = *nh, i__4 = *n - kcol + 1; kln = min(i__3,i__4); dgemm_("C", "N", &jw, &kln, &jw, &c_b15, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &c_b29, &t[t_offset], ldt); dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh); /* L80: */ } } /* ==== Update vertical slab in Z ==== */ if (*wantz) { i__1 = *ihiz; i__2 = *nv; for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = *ihiz - krow + 1; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &c_b29, &wv[ wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz); /* L90: */ } } } /* ==== Return the number of deflations ... ==== */ *nd = jw - *ns; /* ==== ... and the number of shifts. (Subtracting . INFQR from the spike length takes care . of the case of a rare QR failure while . calculating eigenvalues of the deflation . window.) ==== */ *ns -= infqr; /* ==== Return optimal workspace. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR3 ==== */ return 0; } /* dlaqr3_ */ /* Subroutine */ int dlaqr4_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static integer ndfl, kbot, nmin; static doublereal swap; static integer ktop; static doublereal zdum[1] /* was [1][1] */; static integer kacc22, i__, k; static logical nwinc; static integer itmax, nsmax, nwmax, kwtop; extern /* Subroutine */ int dlaqr2_(logical *, logical *, integer *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr5_( logical *, logical *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *); static doublereal aa, bb, cc, dd; static integer ld; static doublereal cs; static integer nh, nibble, it, ks, kt; static doublereal sn; static integer ku, kv, ls, ns; static doublereal ss; static integer nw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static char jbcmpz[2]; static logical sorted; static integer lwkopt, inf, kdu, nho, nve, kwh, nsr, nwr, kwv; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This subroutine implements one level of recursion for DLAQR0. It is a complete implementation of the small bulge multi-shift QR algorithm. It may be called by DLAQR0 and, for large enough deflation window size, it may be called by DLAQR3. This subroutine is identical to DLAQR0 except that it calls DLAQR2 instead of DLAQR3. Purpose ======= DLAQR4 computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, H(ILO,ILO-1) is zero. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise, ILO and IHI should be set to 1 and N, respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and WANTT is .TRUE., then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is .FALSE., then the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) This subroutine may explicitly set H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (IHI) WI (output) DOUBLE PRECISION array, dimension (IHI) The real and imaginary parts, respectively, of the computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored WR(ILO:IHI) and WI(ILO:IHI). If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) If WANTZ is .FALSE., then Z is not referenced. If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal Schur factor of H(ILO:IHI,ILO:IHI). (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if WANTZ is .TRUE. then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK On exit, if LWORK = -1, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DLAQR4 does a workspace query. In this case, DLAQR4 checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .GT. 0: if INFO = i, DLAQR4 failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and WANT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z(ILO:IHI,ILOZ:IHIZ) = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal matrix in (*) (regard- less of the value of WANTT.) If INFO .GT. 0 and WANTZ is .FALSE., then Z is not accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== Exceptional deflation windows: try to cure rare . slow convergence by increasing the size of the . deflation window after KEXNW iterations. ===== ==== Exceptional shifts: try to cure rare slow convergence . with ad-hoc exceptional shifts every KEXSH iterations. . The constants WILK1 and WILK2 are used to form the . exceptional shifts. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; /* ==== Quick return for N = 0: nothing to do. ==== */ if (*n == 0) { work[1] = 1.; return 0; } /* ==== Set up job flags for ILAENV. ==== */ if (*wantt) { *(unsigned char *)jbcmpz = 'S'; } else { *(unsigned char *)jbcmpz = 'E'; } if (*wantz) { *(unsigned char *)&jbcmpz[1] = 'V'; } else { *(unsigned char *)&jbcmpz[1] = 'N'; } /* ==== Tiny matrices must use DLAHQR. ==== */ if (*n <= 11) { /* ==== Estimate optimal workspace. ==== */ lwkopt = 1; if (*lwork != -1) { dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], & wi[1], iloz, ihiz, &z__[z_offset], ldz, info); } } else { /* ==== Use small bulge multi-shift QR with aggressive early . deflation on larger-than-tiny matrices. ==== ==== Hope for the best. ==== */ *info = 0; /* ==== NWR = recommended deflation window size. At this . point, N .GT. NTINY = 11, so there is enough . subdiagonal workspace for NWR.GE.2 as required. . (In fact, there is enough subdiagonal space for . NWR.GE.3.) ==== */ nwr = ilaenv_(&c__13, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nwr = max(2,nwr); /* Computing MIN */ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2); nwr = min(i__1,nwr); nw = nwr; /* ==== NSR = recommended number of simultaneous shifts. . At this point N .GT. NTINY = 11, so there is at . enough subdiagonal workspace for NSR to be even . and greater than or equal to two as required. ==== */ nsr = ilaenv_(&c__15, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); /* Computing MIN */ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi - *ilo; nsr = min(i__1,i__2); /* Computing MAX */ i__1 = 2, i__2 = nsr - nsr % 2; nsr = max(i__1,i__2); /* ==== Estimate optimal workspace ==== ==== Workspace query call to DLAQR2 ==== */ i__1 = nwr + 1; dlaqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1], &c_n1); /* ==== Optimal workspace = MAX(DLAQR5, DLAQR2) ==== Computing MAX */ i__1 = nsr * 3 / 2, i__2 = (integer) work[1]; lwkopt = max(i__1,i__2); /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== */ nmin = ilaenv_(&c__12, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen) 6, (ftnlen)2); nmin = max(11,nmin); /* ==== Nibble crossover point ==== */ nibble = ilaenv_(&c__14, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); nibble = max(0,nibble); /* ==== Accumulate reflections during ttswp? Use block . 2-by-2 structure during matrix-matrix multiply? ==== */ kacc22 = ilaenv_(&c__16, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); kacc22 = max(0,kacc22); kacc22 = min(2,kacc22); /* ==== NWMAX = the largest possible deflation window for . which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n - 1) / 3, i__2 = *lwork / 2; nwmax = min(i__1,i__2); /* ==== NSMAX = the Largest number of simultaneous shifts . for which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3; nsmax = min(i__1,i__2); nsmax -= nsmax % 2; /* ==== NDFL: an iteration count restarted at deflation. ==== */ ndfl = 1; /* ==== ITMAX = iteration limit ==== Computing MAX */ i__1 = 10, i__2 = *ihi - *ilo + 1; itmax = 30 * max(i__1,i__2); /* ==== Last row and column in the active block ==== */ kbot = *ihi; /* ==== Main Loop ==== */ i__1 = itmax; for (it = 1; it <= i__1; ++it) { /* ==== Done when KBOT falls below ILO ==== */ if (kbot < *ilo) { goto L90; } /* ==== Locate active block ==== */ i__2 = *ilo + 1; for (k = kbot; k >= i__2; --k) { if (h__[k + (k - 1) * h_dim1] == 0.) { goto L20; } /* L10: */ } k = *ilo; L20: ktop = k; /* ==== Select deflation window size ==== */ nh = kbot - ktop + 1; if (ndfl < 5 || nh < nw) { /* ==== Typical deflation window. If possible and . advisable, nibble the entire active block. . If not, use size NWR or NWR+1 depending upon . which has the smaller corresponding subdiagonal . entry (a heuristic). ==== */ nwinc = TRUE_; if (nh <= min(nmin,nwmax)) { nw = nh; } else { /* Computing MIN */ i__2 = min(nwr,nh); nw = min(i__2,nwmax); if (nw < nwmax) { if (nw >= nh - 1) { nw = nh; } else { kwtop = kbot - nw + 1; if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1)) > (d__2 = h__[kwtop - 1 + ( kwtop - 2) * h_dim1], abs(d__2))) { ++nw; } } } } } else { /* ==== Exceptional deflation window. If there have . been no deflations in KEXNW or more iterations, . then vary the deflation window size. At first, . because, larger windows are, in general, more . powerful than smaller ones, rapidly increase the . window up to the maximum reasonable and possible. . Then maybe try a slightly smaller window. ==== */ if (nwinc && nw < min(nwmax,nh)) { /* Computing MIN */ i__2 = min(nwmax,nh), i__3 = nw << 1; nw = min(i__2,i__3); } else { nwinc = FALSE_; if (nw == nh && nh > 2) { nw = nh - 1; } } } /* ==== Aggressive early deflation: . split workspace under the subdiagonal into . - an nw-by-nw work array V in the lower . left-hand-corner, . - an NW-by-at-least-NW-but-more-is-better . (NW-by-NHO) horizontal work array along . the bottom edge, . - an at-least-NW-but-more-is-better (NHV-by-NW) . vertical work array along the left-hand-edge. . ==== */ kv = *n - nw + 1; kt = nw + 1; nho = *n - nw - 1 - kt + 1; kwv = nw + 2; nve = *n - nw - kwv + 1; /* ==== Aggressive early deflation ==== */ dlaqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork); /* ==== Adjust KBOT accounting for new deflations. ==== */ kbot -= ld; /* ==== KS points to the shifts. ==== */ ks = kbot - ls + 1; /* ==== Skip an expensive QR sweep if there is a (partly . heuristic) reason to expect that many eigenvalues . will deflate without it. Here, the QR sweep is . skipped if many eigenvalues have just been deflated . or if the remaining active block is small. */ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min( nmin,nwmax)) { /* ==== NS = nominal number of simultaneous shifts. . This may be lowered (slightly) if DLAQR2 . did not provide that many shifts. ==== Computing MIN Computing MAX */ i__4 = 2, i__5 = kbot - ktop; i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5); ns = min(i__2,i__3); ns -= ns % 2; /* ==== If there have been no deflations . in a multiple of KEXSH iterations, . then try exceptional shifts. . Otherwise use shifts provided by . DLAQR2 above or from the eigenvalues . of a trailing principal submatrix. ==== */ if (ndfl % 6 == 0) { ks = kbot - ns + 1; /* Computing MAX */ i__3 = ks + 1, i__4 = ktop + 2; i__2 = max(i__3,i__4); for (i__ = kbot; i__ >= i__2; i__ += -2) { ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2)); aa = ss * .75 + h__[i__ + i__ * h_dim1]; bb = ss; cc = ss * -.4375; dd = aa; dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1] , &wr[i__], &wi[i__], &cs, &sn); /* L30: */ } if (ks == ktop) { wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1]; wi[ks + 1] = 0.; wr[ks] = wr[ks + 1]; wi[ks] = wi[ks + 1]; } } else { /* ==== Got NS/2 or fewer shifts? Use DLAHQR . on a trailing principal submatrix to . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, . there is enough space below the subdiagonal . to fit an NS-by-NS scratch array.) ==== */ if (kbot - ks + 1 <= ns / 2) { ks = kbot - ns + 1; kt = *n - ns + 1; dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, & h__[kt + h_dim1], ldh); dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt + h_dim1], ldh, &wr[ks], &wi[ks], &c__1, & c__1, zdum, &c__1, &inf); ks += inf; /* ==== In case of a rare QR failure use . eigenvalues of the trailing 2-by-2 . principal submatrix. ==== */ if (ks >= kbot) { aa = h__[kbot - 1 + (kbot - 1) * h_dim1]; cc = h__[kbot + (kbot - 1) * h_dim1]; bb = h__[kbot - 1 + kbot * h_dim1]; dd = h__[kbot + kbot * h_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn) ; ks = kbot - 1; } } if (kbot - ks + 1 > ns) { /* ==== Sort the shifts (Helps a little) . Bubble sort keeps complex conjugate . pairs together. ==== */ sorted = FALSE_; i__2 = ks + 1; for (k = kbot; k >= i__2; --k) { if (sorted) { goto L60; } sorted = TRUE_; i__3 = k - 1; for (i__ = ks; i__ <= i__3; ++i__) { if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[ i__], abs(d__2)) < (d__3 = wr[i__ + 1] , abs(d__3)) + (d__4 = wi[i__ + 1], abs(d__4))) { sorted = FALSE_; swap = wr[i__]; wr[i__] = wr[i__ + 1]; wr[i__ + 1] = swap; swap = wi[i__]; wi[i__] = wi[i__ + 1]; wi[i__ + 1] = swap; } /* L40: */ } /* L50: */ } L60: ; } /* ==== Shuffle shifts into pairs of real shifts . and pairs of complex conjugate shifts . assuming complex conjugate shifts are . already adjacent to one another. (Yes, . they are.) ==== */ i__2 = ks + 2; for (i__ = kbot; i__ >= i__2; i__ += -2) { if (wi[i__] != -wi[i__ - 1]) { swap = wr[i__]; wr[i__] = wr[i__ - 1]; wr[i__ - 1] = wr[i__ - 2]; wr[i__ - 2] = swap; swap = wi[i__]; wi[i__] = wi[i__ - 1]; wi[i__ - 1] = wi[i__ - 2]; wi[i__ - 2] = swap; } /* L70: */ } } /* ==== If there are only two shifts and both are . real, then use only one. ==== */ if (kbot - ks + 1 == 2) { if (wi[kbot] == 0.) { if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs( d__1)) < (d__2 = wr[kbot - 1] - h__[kbot + kbot * h_dim1], abs(d__2))) { wr[kbot - 1] = wr[kbot]; } else { wr[kbot] = wr[kbot - 1]; } } } /* ==== Use up to NS of the the smallest magnatiude . shifts. If there aren't NS shifts available, . then use them all, possibly dropping one to . make the number of shifts even. ==== Computing MIN */ i__2 = ns, i__3 = kbot - ks + 1; ns = min(i__2,i__3); ns -= ns % 2; ks = kbot - ns + 1; /* ==== Small-bulge multi-shift QR sweep: . split workspace under the subdiagonal into . - a KDU-by-KDU work array U in the lower . left-hand-corner, . - a KDU-by-at-least-KDU-but-more-is-better . (KDU-by-NHo) horizontal work array WH along . the bottom edge, . - and an at-least-KDU-but-more-is-better-by-KDU . (NVE-by-KDU) vertical work WV arrow along . the left-hand-edge. ==== */ kdu = ns * 3 - 3; ku = *n - kdu + 1; kwh = kdu + 1; nho = *n - kdu - 3 - (kdu + 1) + 1; kwv = kdu + 4; nve = *n - kdu - kwv + 1; /* ==== Small-bulge multi-shift QR sweep ==== */ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks], &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1], ldh); } /* ==== Note progress (or the lack of it). ==== */ if (ld > 0) { ndfl = 1; } else { ++ndfl; } /* ==== End of main loop ==== L80: */ } /* ==== Iteration limit exceeded. Set INFO to show where . the problem occurred and exit. ==== */ *info = kbot; L90: ; } /* ==== Return the optimal value of LWORK. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR4 ==== */ return 0; } /* dlaqr4_ */ /* Subroutine */ int dlaqr5_(logical *wantt, logical *wantz, integer *kacc22, integer *n, integer *ktop, integer *kbot, integer *nshfts, doublereal *sr, doublereal *si, doublereal *h__, integer *ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *v, integer * ldv, doublereal *u, integer *ldu, integer *nv, doublereal *wv, integer *ldwv, integer *nh, doublereal *wh, integer *ldwh) { /* System generated locals */ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1, wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static doublereal beta; static logical blk22, bmp22; static integer mend, jcol, jlen, jbot, mbot; static doublereal swap; static integer jtop, jrow, mtop, i__, j, k, m; static doublereal alpha; static logical accum; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ndcol, incol, krcol, nbmps; extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer i2, j2, i4, j4, k1; extern /* Subroutine */ int dlaqr1_(integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(doublereal *, doublereal *); static doublereal h11, h12, h21, h22; static integer m22; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static integer ns, nu; static doublereal vt[3]; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal refsum; static integer mstart; static doublereal smlnum, scl; static integer kdu, kms; static doublereal ulp; static integer knz, kzs; static doublereal tst1, tst2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This auxiliary subroutine called by DLAQR0 performs a single small-bulge multi-shift QR sweep. WANTT (input) logical scalar WANTT = .true. if the quasi-triangular Schur factor is being computed. WANTT is set to .false. otherwise. WANTZ (input) logical scalar WANTZ = .true. if the orthogonal Schur factor is being computed. WANTZ is set to .false. otherwise. KACC22 (input) integer with value 0, 1, or 2. Specifies the computation mode of far-from-diagonal orthogonal updates. = 0: DLAQR5 does not accumulate reflections and does not use matrix-matrix multiply to update far-from-diagonal matrix entries. = 1: DLAQR5 accumulates reflections and uses matrix-matrix multiply to update the far-from-diagonal matrix entries. = 2: DLAQR5 accumulates reflections, uses matrix-matrix multiply to update the far-from-diagonal matrix entries, and takes advantage of 2-by-2 block structure during matrix multiplies. N (input) integer scalar N is the order of the Hessenberg matrix H upon which this subroutine operates. KTOP (input) integer scalar KBOT (input) integer scalar These are the first and last rows and columns of an isolated diagonal block upon which the QR sweep is to be applied. It is assumed without a check that either KTOP = 1 or H(KTOP,KTOP-1) = 0 and either KBOT = N or H(KBOT+1,KBOT) = 0. NSHFTS (input) integer scalar NSHFTS gives the number of simultaneous shifts. NSHFTS must be positive and even. SR (input) DOUBLE PRECISION array of size (NSHFTS) SI (input) DOUBLE PRECISION array of size (NSHFTS) SR contains the real parts and SI contains the imaginary parts of the NSHFTS shifts of origin that define the multi-shift QR sweep. H (input/output) DOUBLE PRECISION array of size (LDH,N) On input H contains a Hessenberg matrix. On output a multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied to the isolated diagonal block in rows and columns KTOP through KBOT. LDH (input) integer scalar LDH is the leading dimension of H just as declared in the calling procedure. LDH.GE.MAX(1,N). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N Z (input/output) DOUBLE PRECISION array of size (LDZ,IHI) If WANTZ = .TRUE., then the QR Sweep orthogonal similarity transformation is accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ = .FALSE., then Z is unreferenced. LDZ (input) integer scalar LDA is the leading dimension of Z just as declared in the calling procedure. LDZ.GE.N. V (workspace) DOUBLE PRECISION array of size (LDV,NSHFTS/2) LDV (input) integer scalar LDV is the leading dimension of V as declared in the calling procedure. LDV.GE.3. U (workspace) DOUBLE PRECISION array of size (LDU,3*NSHFTS-3) LDU (input) integer scalar LDU is the leading dimension of U just as declared in the in the calling subroutine. LDU.GE.3*NSHFTS-3. NH (input) integer scalar NH is the number of columns in array WH available for workspace. NH.GE.1. WH (workspace) DOUBLE PRECISION array of size (LDWH,NH) LDWH (input) integer scalar Leading dimension of WH just as declared in the calling procedure. LDWH.GE.3*NSHFTS-3. NV (input) integer scalar NV is the number of rows in WV agailable for workspace. NV.GE.1. WV (workspace) DOUBLE PRECISION array of size (LDWV,3*NSHFTS-3) LDWV (input) integer scalar LDWV is the leading dimension of WV as declared in the in the calling subroutine. LDWV.GE.NV. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ============================================================ Reference: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. ============================================================ ==== If there are no shifts, then there is nothing to do. ==== */ /* Parameter adjustments */ --sr; --si; h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; wh_dim1 = *ldwh; wh_offset = 1 + wh_dim1 * 1; wh -= wh_offset; /* Function Body */ if (*nshfts < 2) { return 0; } /* ==== If the active block is empty or 1-by-1, then there . is nothing to do. ==== */ if (*ktop >= *kbot) { return 0; } /* ==== Shuffle shifts into pairs of real shifts and pairs . of complex conjugate shifts assuming complex . conjugate shifts are already adjacent to one . another. ==== */ i__1 = *nshfts - 2; for (i__ = 1; i__ <= i__1; i__ += 2) { if (si[i__] != -si[i__ + 1]) { swap = sr[i__]; sr[i__] = sr[i__ + 1]; sr[i__ + 1] = sr[i__ + 2]; sr[i__ + 2] = swap; swap = si[i__]; si[i__] = si[i__ + 1]; si[i__ + 1] = si[i__ + 2]; si[i__ + 2] = swap; } /* L10: */ } /* ==== NSHFTS is supposed to be even, but if is odd, . then simply reduce it by one. The shuffle above . ensures that the dropped shift is real and that . the remaining shifts are paired. ==== */ ns = *nshfts - *nshfts % 2; /* ==== Machine constants for deflation ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Use accumulated reflections to update far-from-diagonal . entries ? ==== */ accum = *kacc22 == 1 || *kacc22 == 2; /* ==== If so, exploit the 2-by-2 block structure? ==== */ blk22 = ns > 2 && *kacc22 == 2; /* ==== clear trash ==== */ if (*ktop + 2 <= *kbot) { h__[*ktop + 2 + *ktop * h_dim1] = 0.; } /* ==== NBMPS = number of 2-shift bulges in the chain ==== */ nbmps = ns / 2; /* ==== KDU = width of slab ==== */ kdu = nbmps * 6 - 3; /* ==== Create and chase chains of NBMPS bulges ==== */ i__1 = *kbot - 2; i__2 = nbmps * 3 - 2; for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 : incol <= i__1; incol += i__2) { ndcol = incol + kdu; if (accum) { dlaset_("ALL", &kdu, &kdu, &c_b29, &c_b15, &u[u_offset], ldu); } /* ==== Near-the-diagonal bulge chase. The following loop . performs the near-the-diagonal part of a small bulge . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal . chunk extends from column INCOL to column NDCOL . (including both column INCOL and column NDCOL). The . following loop chases a 3*NBMPS column long chain of . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL . may be less than KTOP and and NDCOL may be greater than . KBOT indicating phantom columns from which to chase . bulges before they are actually introduced or to which . to chase bulges beyond column KBOT.) ==== Computing MIN */ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2; i__3 = min(i__4,i__5); for (krcol = incol; krcol <= i__3; ++krcol) { /* ==== Bulges number MTOP to MBOT are active double implicit . shift bulges. There may or may not also be small . 2-by-2 bulge, if there is room. The inactive bulges . (if any) must wait until the active bulges have moved . down the diagonal to make room. The phantom matrix . paradigm described above helps keep track. ==== Computing MAX */ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1; mtop = max(i__4,i__5); /* Computing MIN */ i__4 = nbmps, i__5 = (*kbot - krcol) / 3; mbot = min(i__4,i__5); m22 = mbot + 1; bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2; /* ==== Generate reflections to chase the chain right . one column. (The minimum value of K is KTOP-1.) ==== */ i__4 = mbot; for (m = mtop; m <= i__4; ++m) { k = krcol + (m - 1) * 3; if (k == *ktop - 1) { dlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m * 2], &v[m * v_dim1 + 1]); alpha = v[m * v_dim1 + 1]; dlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m * v_dim1 + 1]); } else { beta = h__[k + 1 + k * h_dim1]; v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1]; v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1]; dlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m * v_dim1 + 1]); /* ==== A Bulge may collapse because of vigilant . deflation or destructive underflow. (The . initial bulge is always collapsed.) Use . the two-small-subdiagonals trick to try . to get it started again. If V(2,M).NE.0 and . V(3,M) = H(K+3,K+1) = H(K+3,K+2) = 0, then . this bulge is collapsing into a zero . subdiagonal. It will be restarted next . trip through the loop.) */ if (v[m * v_dim1 + 1] != 0. && (v[m * v_dim1 + 3] != 0. || h__[k + 3 + (k + 1) * h_dim1] == 0. && h__[k + 3 + (k + 2) * h_dim1] == 0.)) { /* ==== Typical case: not collapsed (yet). ==== */ h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; } else { /* ==== Atypical case: collapsed. Attempt to . reintroduce ignoring H(K+1,K). If the . fill resulting from the new reflector . is too large, then abandon it. . Otherwise, use the new one. ==== */ dlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, & sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m * 2], vt); scl = abs(vt[0]) + abs(vt[1]) + abs(vt[2]); if (scl != 0.) { vt[0] /= scl; vt[1] /= scl; vt[2] /= scl; } /* ==== The following is the traditional and . conservative two-small-subdiagonals . test. ==== . */ if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) * ( abs(vt[1]) + abs(vt[2])) > ulp * abs(vt[0]) * ((d__2 = h__[k + k * h_dim1], abs(d__2)) + ( d__3 = h__[k + 1 + (k + 1) * h_dim1], abs( d__3)) + (d__4 = h__[k + 2 + (k + 2) * h_dim1] , abs(d__4)))) { /* ==== Starting a new bulge here would . create non-negligible fill. If . the old reflector is diagonal (only . possible with underflows), then . change it to I. Otherwise, use . it with trepidation. ==== */ if (v[m * v_dim1 + 2] == 0. && v[m * v_dim1 + 3] == 0.) { v[m * v_dim1 + 1] = 0.; } else { h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; } } else { /* ==== Stating a new bulge here would . create only negligible fill. . Replace the old reflector with . the new one. ==== */ alpha = vt[0]; dlarfg_(&c__3, &alpha, &vt[1], &c__1, vt); refsum = h__[k + 1 + k * h_dim1] + h__[k + 2 + k * h_dim1] * vt[1] + h__[k + 3 + k * h_dim1] * vt[2]; h__[k + 1 + k * h_dim1] -= vt[0] * refsum; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; v[m * v_dim1 + 1] = vt[0]; v[m * v_dim1 + 2] = vt[1]; v[m * v_dim1 + 3] = vt[2]; } } } /* L20: */ } /* ==== Generate a 2-by-2 reflection, if needed. ==== */ k = krcol + (m22 - 1) * 3; if (bmp22) { if (k == *ktop - 1) { dlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[( m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2], &si[m22 * 2], &v[m22 * v_dim1 + 1]); beta = v[m22 * v_dim1 + 1]; dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 * v_dim1 + 1]); } else { beta = h__[k + 1 + k * h_dim1]; v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1]; dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 * v_dim1 + 1]); h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; } } else { /* ==== Initialize V(1,M22) here to avoid possible undefined . variable problems later. ==== */ v[m22 * v_dim1 + 1] = 0.; } /* ==== Multiply H by reflections from the left ==== */ if (accum) { jbot = min(ndcol,*kbot); } else if (*wantt) { jbot = *n; } else { jbot = *kbot; } i__4 = jbot; for (j = max(*ktop,krcol); j <= i__4; ++j) { /* Computing MIN */ i__5 = mbot, i__6 = (j - krcol + 2) / 3; mend = min(i__5,i__6); i__5 = mend; for (m = mtop; m <= i__5; ++m) { k = krcol + (m - 1) * 3; refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[ m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m * v_dim1 + 3] * h__[k + 3 + j * h_dim1]); h__[k + 1 + j * h_dim1] -= refsum; h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2]; h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L30: */ } /* L40: */ } if (bmp22) { k = krcol + (m22 - 1) * 3; /* Computing MAX */ i__4 = k + 1; i__5 = jbot; for (j = max(i__4,*ktop); j <= i__5; ++j) { refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]); h__[k + 1 + j * h_dim1] -= refsum; h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L50: */ } } /* ==== Multiply H by reflections from the right. . Delay filling in the last row until the . vigilant deflation check is complete. ==== */ if (accum) { jtop = max(*ktop,incol); } else if (*wantt) { jtop = 1; } else { jtop = *ktop; } i__5 = mbot; for (m = mtop; m <= i__5; ++m) { if (v[m * v_dim1 + 1] != 0.) { k = krcol + (m - 1) * 3; /* Computing MIN */ i__6 = *kbot, i__7 = k + 3; i__4 = min(i__6,i__7); for (j = jtop; j <= i__4; ++j) { refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2) * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k + 3) * h_dim1]); h__[j + (k + 1) * h_dim1] -= refsum; h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 + 2]; h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L60: */ } if (accum) { /* ==== Accumulate U. (If necessary, update Z later . with with an efficient matrix-matrix . multiply.) ==== */ kms = k - incol; /* Computing MAX */ i__4 = 1, i__6 = *ktop - incol; i__7 = kdu; for (j = max(i__4,i__6); j <= i__7; ++j) { refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) * u_dim1] + v[m * v_dim1 + 2] * u[j + (kms + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j + (kms + 3) * u_dim1]); u[j + (kms + 1) * u_dim1] -= refsum; u[j + (kms + 2) * u_dim1] -= refsum * v[m * v_dim1 + 2]; u[j + (kms + 3) * u_dim1] -= refsum * v[m * v_dim1 + 3]; /* L70: */ } } else if (*wantz) { /* ==== U is not accumulated, so update Z . now by multiplying by reflections . from the right. ==== */ i__7 = *ihiz; for (j = *iloz; j <= i__7; ++j) { refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) * z_dim1] + v[m * v_dim1 + 2] * z__[j + (k + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[ j + (k + 3) * z_dim1]); z__[j + (k + 1) * z_dim1] -= refsum; z__[j + (k + 2) * z_dim1] -= refsum * v[m * v_dim1 + 2]; z__[j + (k + 3) * z_dim1] -= refsum * v[m * v_dim1 + 3]; /* L80: */ } } } /* L90: */ } /* ==== Special case: 2-by-2 reflection (if needed) ==== */ k = krcol + (m22 - 1) * 3; if (bmp22 && v[m22 * v_dim1 + 1] != 0.) { /* Computing MIN */ i__7 = *kbot, i__4 = k + 3; i__5 = min(i__7,i__4); for (j = jtop; j <= i__5; ++j) { refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1] + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1]) ; h__[j + (k + 1) * h_dim1] -= refsum; h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L100: */ } if (accum) { kms = k - incol; /* Computing MAX */ i__5 = 1, i__7 = *ktop - incol; i__4 = kdu; for (j = max(i__5,i__7); j <= i__4; ++j) { refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) * u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms + 2) * u_dim1]); u[j + (kms + 1) * u_dim1] -= refsum; u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L110: */ } } else if (*wantz) { i__4 = *ihiz; for (j = *iloz; j <= i__4; ++j) { refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) * z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k + 2) * z_dim1]); z__[j + (k + 1) * z_dim1] -= refsum; z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L120: */ } } } /* ==== Vigilant deflation check ==== */ mstart = mtop; if (krcol + (mstart - 1) * 3 < *ktop) { ++mstart; } mend = mbot; if (bmp22) { ++mend; } if (krcol == *kbot - 2) { ++mend; } i__4 = mend; for (m = mstart; m <= i__4; ++m) { /* Computing MIN */ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3; k = min(i__5,i__7); /* ==== The following convergence test requires that . the tradition small-compared-to-nearby-diagonals . criterion and the Ahues & Tisseur (LAWN 122, 1997) . criteria both be satisfied. The latter improves . accuracy in some examples. Falling back on an . alternate convergence criterion when TST1 or TST2 . is zero (as done here) is traditional but probably . unnecessary. ==== */ if (h__[k + 1 + k * h_dim1] != 0.) { tst1 = (d__1 = h__[k + k * h_dim1], abs(d__1)) + (d__2 = h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); if (tst1 == 0.) { if (k >= *ktop + 1) { tst1 += (d__1 = h__[k + (k - 1) * h_dim1], abs( d__1)); } if (k >= *ktop + 2) { tst1 += (d__1 = h__[k + (k - 2) * h_dim1], abs( d__1)); } if (k >= *ktop + 3) { tst1 += (d__1 = h__[k + (k - 3) * h_dim1], abs( d__1)); } if (k <= *kbot - 2) { tst1 += (d__1 = h__[k + 2 + (k + 1) * h_dim1], abs(d__1)); } if (k <= *kbot - 3) { tst1 += (d__1 = h__[k + 3 + (k + 1) * h_dim1], abs(d__1)); } if (k <= *kbot - 4) { tst1 += (d__1 = h__[k + 4 + (k + 1) * h_dim1], abs(d__1)); } } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * tst1; if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) <= max( d__2,d__3)) { /* Computing MAX */ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs( d__2)); h12 = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs( d__2)); h21 = min(d__3,d__4); /* Computing MAX */ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs( d__1)), d__4 = (d__2 = h__[k + k * h_dim1] - h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); h11 = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs( d__1)), d__4 = (d__2 = h__[k + k * h_dim1] - h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); h22 = min(d__3,d__4); scl = h11 + h12; tst2 = h22 * (h11 / scl); /* Computing MAX */ d__1 = smlnum, d__2 = ulp * tst2; if (tst2 == 0. || h21 * (h12 / scl) <= max(d__1,d__2)) { h__[k + 1 + k * h_dim1] = 0.; } } } /* L130: */ } /* ==== Fill in the last row of each bulge. ==== Computing MIN */ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3; mend = min(i__4,i__5); i__4 = mend; for (m = mtop; m <= i__4; ++m) { k = krcol + (m - 1) * 3; refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + ( k + 3) * h_dim1]; h__[k + 4 + (k + 1) * h_dim1] = -refsum; h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2]; h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L140: */ } /* ==== End of near-the-diagonal bulge chase. ==== L150: */ } /* ==== Use U (if accumulated) to update far-from-diagonal . entries in H. If required, use U to update Z as . well. ==== */ if (accum) { if (*wantt) { jtop = 1; jbot = *n; } else { jtop = *ktop; jbot = *kbot; } if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) { /* ==== Updates not exploiting the 2-by-2 block . structure of U. K1 and NU keep track of . the location and size of U in the special . cases of introducing bulges and chasing . bulges off the bottom. In these special . cases and in case the number of shifts . is NS = 2, there is no 2-by-2 block . structure to exploit. ==== Computing MAX */ i__3 = 1, i__4 = *ktop - incol; k1 = max(i__3,i__4); /* Computing MAX */ i__3 = 0, i__4 = ndcol - *kbot; nu = kdu - max(i__3,i__4) - k1 + 1; /* ==== Horizontal Multiply ==== */ i__3 = jbot; i__4 = *nh; for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 : jcol <= i__3; jcol += i__4) { /* Computing MIN */ i__5 = *nh, i__7 = jbot - jcol + 1; jlen = min(i__5,i__7); dgemm_("C", "N", &nu, &jlen, &nu, &c_b15, &u[k1 + k1 * u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1], ldh, &c_b29, &wh[wh_offset], ldwh); dlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[ incol + k1 + jcol * h_dim1], ldh); /* L160: */ } /* ==== Vertical multiply ==== */ i__4 = max(*ktop,incol) - 1; i__3 = *nv; for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4; jrow += i__3) { /* Computing MIN */ i__5 = *nv, i__7 = max(*ktop,incol) - jrow; jlen = min(i__5,i__7); dgemm_("N", "N", &jlen, &nu, &nu, &c_b15, &h__[jrow + ( incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1], ldu, &c_b29, &wv[wv_offset], ldwv); dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[ jrow + (incol + k1) * h_dim1], ldh); /* L170: */ } /* ==== Z multiply (also vertical) ==== */ if (*wantz) { i__3 = *ihiz; i__4 = *nv; for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3; jrow += i__4) { /* Computing MIN */ i__5 = *nv, i__7 = *ihiz - jrow + 1; jlen = min(i__5,i__7); dgemm_("N", "N", &jlen, &nu, &nu, &c_b15, &z__[jrow + (incol + k1) * z_dim1], ldz, &u[k1 + k1 * u_dim1], ldu, &c_b29, &wv[wv_offset], ldwv); dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[ jrow + (incol + k1) * z_dim1], ldz) ; /* L180: */ } } } else { /* ==== Updates exploiting U's 2-by-2 block structure. . (I2, I4, J2, J4 are the last rows and columns . of the blocks.) ==== */ i2 = (kdu + 1) / 2; i4 = kdu; j2 = i4 - i2; j4 = kdu; /* ==== KZS and KNZ deal with the band of zeros . along the diagonal of one of the triangular . blocks. ==== */ kzs = j4 - j2 - (ns + 1); knz = ns + 1; /* ==== Horizontal multiply ==== */ i__4 = jbot; i__3 = *nh; for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 : jcol <= i__4; jcol += i__3) { /* Computing MIN */ i__5 = *nh, i__7 = jbot - jcol + 1; jlen = min(i__5,i__7); /* ==== Copy bottom of H to top+KZS of scratch ==== (The first KZS rows get multiplied by zero.) ==== */ dlacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol * h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh); /* ==== Multiply by U21' ==== */ dlaset_("ALL", &kzs, &jlen, &c_b29, &c_b29, &wh[wh_offset] , ldwh); dtrmm_("L", "U", "C", "N", &knz, &jlen, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1] , ldwh); /* ==== Multiply top of H by U11' ==== */ dgemm_("C", "N", &i2, &jlen, &j2, &c_b15, &u[u_offset], ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b15, &wh[wh_offset], ldwh); /* ==== Copy top of H bottom of WH ==== */ dlacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1] , ldh, &wh[i2 + 1 + wh_dim1], ldwh); /* ==== Multiply by U21' ==== */ dtrmm_("L", "L", "C", "N", &j2, &jlen, &c_b15, &u[(i2 + 1) * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("C", "N", &i__5, &jlen, &i__7, &c_b15, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 + jcol * h_dim1], ldh, &c_b15, &wh[i2 + 1 + wh_dim1] , ldwh); /* ==== Copy it back ==== */ dlacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[ incol + 1 + jcol * h_dim1], ldh); /* L190: */ } /* ==== Vertical multiply ==== */ i__3 = max(incol,*ktop) - 1; i__4 = *nv; for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3; jrow += i__4) { /* Computing MIN */ i__5 = *nv, i__7 = max(incol,*ktop) - jrow; jlen = min(i__5,i__7); /* ==== Copy right of H to scratch (the first KZS . columns get multiplied by zero) ==== */ dlacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) * h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ dlaset_("ALL", &jlen, &kzs, &c_b29, &c_b29, &wv[wv_offset] , ldwv); dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U11 ==== */ dgemm_("N", "N", &jlen, &i2, &j2, &c_b15, &h__[jrow + ( incol + 1) * h_dim1], ldh, &u[u_offset], ldu, & c_b15, &wv[wv_offset], ldwv) ; /* ==== Copy left of H to right of scratch ==== */ dlacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) * h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ i__5 = i4 - i2; dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b15, &u[(i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1] , ldwv); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b15, &h__[jrow + (incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &c_b15, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Copy it back ==== */ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[ jrow + (incol + 1) * h_dim1], ldh); /* L200: */ } /* ==== Multiply Z (also vertical) ==== */ if (*wantz) { i__4 = *ihiz; i__3 = *nv; for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4; jrow += i__3) { /* Computing MIN */ i__5 = *nv, i__7 = *ihiz - jrow + 1; jlen = min(i__5,i__7); /* ==== Copy right of Z to left of scratch (first . KZS columns get multiplied by zero) ==== */ dlacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 + j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U12 ==== */ dlaset_("ALL", &jlen, &kzs, &c_b29, &c_b29, &wv[ wv_offset], ldwv); dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U11 ==== */ dgemm_("N", "N", &jlen, &i2, &j2, &c_b15, &z__[jrow + (incol + 1) * z_dim1], ldz, &u[u_offset], ldu, &c_b15, &wv[wv_offset], ldwv); /* ==== Copy left of Z to right of scratch ==== */ dlacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) * z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ i__5 = i4 - i2; dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b15, &u[( i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b15, &z__[ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &c_b15, &wv[( i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Copy the result back to Z ==== */ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, & z__[jrow + (incol + 1) * z_dim1], ldz); /* L210: */ } } } } /* L220: */ } /* ==== End of DLAQR5 ==== */ return 0; } /* dlaqr5_ */ /* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v, integer *incv, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) { /* System generated locals */ integer c_dim1, c_offset; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARF applies a real elementary reflector H to a real m by n matrix C, from either the left or the right. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. Arguments ========= SIDE (input) CHARACTER*1 = 'L': form H * C = 'R': form C * H M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. V (input) DOUBLE PRECISION array, dimension (1 + (M-1)*abs(INCV)) if SIDE = 'L' or (1 + (N-1)*abs(INCV)) if SIDE = 'R' The vector v in the representation of H. V is not used if TAU = 0. INCV (input) INTEGER The increment between elements of v. INCV <> 0. TAU (input) DOUBLE PRECISION The value tau in the representation of H. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by the matrix H * C if SIDE = 'L', or C * H if SIDE = 'R'. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L' or (M) if SIDE = 'R' ===================================================================== */ /* Parameter adjustments */ --v; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ if (lsame_(side, "L")) { /* Form H * C */ if (*tau != 0.) { /* w := C' * v */ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], incv, &c_b29, &work[1], &c__1); /* C := C - v * w' */ d__1 = -(*tau); dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset], ldc); } } else { /* Form C * H */ if (*tau != 0.) { /* w := C * v */ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], incv, &c_b29, &work[1], &c__1); /* C := C - w * v' */ d__1 = -(*tau); dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset], ldc); } } return 0; /* End of DLARF */ } /* dlarf_ */ /* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char * storev, integer *m, integer *n, integer *k, doublereal *v, integer * ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc, doublereal *work, integer *ldwork) { /* System generated locals */ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1, work_offset, i__1, i__2; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static char transt[1]; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFB applies a real block reflector H or its transpose H' to a real m by n matrix C, from either the left or the right. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply H or H' from the Left = 'R': apply H or H' from the Right TRANS (input) CHARACTER*1 = 'N': apply H (No transpose) = 'T': apply H' (Transpose) DIRECT (input) CHARACTER*1 Indicates how H is formed from a product of elementary reflectors = 'F': H = H(1) H(2) . . . H(k) (Forward) = 'B': H = H(k) . . . H(2) H(1) (Backward) STOREV (input) CHARACTER*1 Indicates how the vectors which define the elementary reflectors are stored: = 'C': Columnwise = 'R': Rowwise M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. K (input) INTEGER The order of the matrix T (= the number of elementary reflectors whose product defines the block reflector). V (input) DOUBLE PRECISION array, dimension (LDV,K) if STOREV = 'C' (LDV,M) if STOREV = 'R' and SIDE = 'L' (LDV,N) if STOREV = 'R' and SIDE = 'R' The matrix V. See further details. LDV (input) INTEGER The leading dimension of the array V. If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); if STOREV = 'R', LDV >= K. T (input) DOUBLE PRECISION array, dimension (LDT,K) The triangular k by k matrix T in the representation of the block reflector. LDT (input) INTEGER The leading dimension of the array T. LDT >= K. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by H*C or H'*C or C*H or C*H'. LDC (input) INTEGER The leading dimension of the array C. LDA >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K) LDWORK (input) INTEGER The leading dimension of the array WORK. If SIDE = 'L', LDWORK >= max(1,N); if SIDE = 'R', LDWORK >= max(1,M). ===================================================================== Quick return if possible */ /* Parameter adjustments */ v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; work_dim1 = *ldwork; work_offset = 1 + work_dim1 * 1; work -= work_offset; /* Function Body */ if (*m <= 0 || *n <= 0) { return 0; } if (lsame_(trans, "N")) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } if (lsame_(storev, "C")) { if (lsame_(direct, "F")) { /* Let V = ( V1 ) (first K rows) ( V2 ) where V1 is unit lower triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) W := C1' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L10: */ } /* W := W * V1 */ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C2'*V2 */ i__1 = *m - *k; dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V * W' */ if (*m > *k) { /* C2 := C2 - V2 * W' */ i__1 = *m - *k; dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, &v[*k + 1 + v_dim1], ldv, &work[work_offset], ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); } /* W := W * V1' */ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L20: */ } /* L30: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V = (C1*V1 + C2*V2) (stored in WORK) W := C1 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &c__1); /* L40: */ } /* W := W * V1 */ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C2 * V2 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, k, &i__1, & c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + 1 + v_dim1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V' */ if (*n > *k) { /* C2 := C2 - W * V2' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, &work[work_offset], ldwork, &v[*k + 1 + v_dim1], ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc); } /* W := W * V1' */ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; /* L50: */ } /* L60: */ } } } else { /* Let V = ( V1 ) ( V2 ) (last K rows) where V2 is unit upper triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) W := C2' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L70: */ } /* W := W * V2 */ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C1'*V1 */ i__1 = *m - *k; dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V * W' */ if (*m > *k) { /* C1 := C1 - V1 * W' */ i__1 = *m - *k; dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, &v[v_offset], ldv, &work[work_offset], ldwork, & c_b15, &c__[c_offset], ldc) ; } /* W := W * V2' */ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); /* C2 := C2 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L80: */ } /* L90: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V = (C1*V1 + C2*V2) (stored in WORK) W := C2 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ j * work_dim1 + 1], &c__1); /* L100: */ } /* W := W * V2 */ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C1 * V1 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, k, &i__1, & c_b15, &c__[c_offset], ldc, &v[v_offset], ldv, & c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V' */ if (*n > *k) { /* C1 := C1 - W * V1' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, &work[work_offset], ldwork, &v[v_offset], ldv, & c_b15, &c__[c_offset], ldc) ; } /* W := W * V2' */ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); /* C2 := C2 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * work_dim1]; /* L110: */ } /* L120: */ } } } } else if (lsame_(storev, "R")) { if (lsame_(direct, "F")) { /* Let V = ( V1 V2 ) (V1: first K columns) where V1 is unit upper triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) W := C1' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L130: */ } /* W := W * V1' */ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C2'*V2' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V' * W' */ if (*m > *k) { /* C2 := C2 - V2' * W' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ (*k + 1) * v_dim1 + 1], ldv, &work[work_offset], ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); } /* W := W * V1 */ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L140: */ } /* L150: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V' = (C1*V1' + C2*V2') (stored in WORK) W := C1 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &c__1); /* L160: */ } /* W := W * V1' */ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C2 * V2' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V */ if (*n > *k) { /* C2 := C2 - W * V2 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, &i__1, k, & c_b151, &work[work_offset], ldwork, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc); } /* W := W * V1 */ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; /* L170: */ } /* L180: */ } } } else { /* Let V = ( V1 V2 ) (V2: last K columns) where V2 is unit lower triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) W := C2' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L190: */ } /* W := W * V2' */ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] , ldwork); if (*m > *k) { /* W := W + C1'*V1' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V' * W' */ if (*m > *k) { /* C1 := C1 - V1' * W' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ v_offset], ldv, &work[work_offset], ldwork, & c_b15, &c__[c_offset], ldc); } /* W := W * V2 */ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[ work_offset], ldwork); /* C2 := C2 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L200: */ } /* L210: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V' = (C1*V1' + C2*V2') (stored in WORK) W := C2 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ j * work_dim1 + 1], &c__1); /* L220: */ } /* W := W * V2' */ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] , ldwork); if (*n > *k) { /* W := W + C1 * V1' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V */ if (*n > *k) { /* C1 := C1 - W * V1 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, &i__1, k, & c_b151, &work[work_offset], ldwork, &v[v_offset], ldv, &c_b15, &c__[c_offset], ldc); } /* W := W * V2 */ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[ work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * work_dim1]; /* L230: */ } /* L240: */ } } } } return 0; /* End of DLARFB */ } /* dlarfb_ */ /* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal beta; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal xnorm; static doublereal safmin, rsafmn; static integer knt; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFG generates a real elementary reflector H of order n, such that H * ( alpha ) = ( beta ), H' * H = I. ( x ) ( 0 ) where alpha and beta are scalars, and x is an (n-1)-element real vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v' ) , ( v ) where tau is a real scalar and v is a real (n-1)-element vector. If the elements of x are all zero, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= tau <= 2. Arguments ========= N (input) INTEGER The order of the elementary reflector. ALPHA (input/output) DOUBLE PRECISION On entry, the value alpha. On exit, it is overwritten with the value beta. X (input/output) DOUBLE PRECISION array, dimension (1+(N-2)*abs(INCX)) On entry, the vector x. On exit, it is overwritten with the vector v. INCX (input) INTEGER The increment between elements of X. INCX > 0. TAU (output) DOUBLE PRECISION The value tau. ===================================================================== */ /* Parameter adjustments */ --x; /* Function Body */ if (*n <= 1) { *tau = 0.; return 0; } i__1 = *n - 1; xnorm = dnrm2_(&i__1, &x[1], incx); if (xnorm == 0.) { /* H = I */ *tau = 0.; } else { /* general case */ d__1 = dlapy2_(alpha, &xnorm); beta = -d_sign(&d__1, alpha); safmin = SAFEMINIMUM / EPSILON; if (abs(beta) < safmin) { /* XNORM, BETA may be inaccurate; scale X and recompute them */ rsafmn = 1. / safmin; knt = 0; L10: ++knt; i__1 = *n - 1; dscal_(&i__1, &rsafmn, &x[1], incx); beta *= rsafmn; *alpha *= rsafmn; if (abs(beta) < safmin) { goto L10; } /* New BETA is at most 1, at least SAFMIN */ i__1 = *n - 1; xnorm = dnrm2_(&i__1, &x[1], incx); d__1 = dlapy2_(alpha, &xnorm); beta = -d_sign(&d__1, alpha); *tau = (beta - *alpha) / beta; i__1 = *n - 1; d__1 = 1. / (*alpha - beta); dscal_(&i__1, &d__1, &x[1], incx); /* If ALPHA is subnormal, it may lose relative accuracy */ *alpha = beta; i__1 = knt; for (j = 1; j <= i__1; ++j) { *alpha *= safmin; /* L20: */ } } else { *tau = (beta - *alpha) / beta; i__1 = *n - 1; d__1 = 1. / (*alpha - beta); dscal_(&i__1, &d__1, &x[1], incx); *alpha = beta; } } return 0; /* End of DLARFG */ } /* dlarfg_ */ /* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer * k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t, integer *ldt) { /* System generated locals */ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dtrmv_(char *, char *, char *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal vii; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFT forms the triangular factor T of a real block reflector H of order n, which is defined as a product of k elementary reflectors. If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. If STOREV = 'C', the vector which defines the elementary reflector H(i) is stored in the i-th column of the array V, and H = I - V * T * V' If STOREV = 'R', the vector which defines the elementary reflector H(i) is stored in the i-th row of the array V, and H = I - V' * T * V Arguments ========= DIRECT (input) CHARACTER*1 Specifies the order in which the elementary reflectors are multiplied to form the block reflector: = 'F': H = H(1) H(2) . . . H(k) (Forward) = 'B': H = H(k) . . . H(2) H(1) (Backward) STOREV (input) CHARACTER*1 Specifies how the vectors which define the elementary reflectors are stored (see also Further Details): = 'C': columnwise = 'R': rowwise N (input) INTEGER The order of the block reflector H. N >= 0. K (input) INTEGER The order of the triangular factor T (= the number of elementary reflectors). K >= 1. V (input/output) DOUBLE PRECISION array, dimension (LDV,K) if STOREV = 'C' (LDV,N) if STOREV = 'R' The matrix V. See further details. LDV (input) INTEGER The leading dimension of the array V. If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i). T (output) DOUBLE PRECISION array, dimension (LDT,K) The k by k triangular factor T of the block reflector. If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is lower triangular. The rest of the array is not used. LDT (input) INTEGER The leading dimension of the array T. LDT >= K. Further Details =============== The shape of the matrix V and the storage of the vectors which define the H(i) is best illustrated by the following example with n = 5 and k = 3. The elements equal to 1 are not stored; the corresponding array elements are modified but restored on exit. The rest of the array is not used. DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) ( v1 1 ) ( 1 v2 v2 v2 ) ( v1 v2 1 ) ( 1 v3 v3 ) ( v1 v2 v3 ) ( v1 v2 v3 ) DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': V = ( v1 v2 v3 ) V = ( v1 v1 1 ) ( v1 v2 v3 ) ( v2 v2 v2 1 ) ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) ( 1 v3 ) ( 1 ) ===================================================================== Quick return if possible */ /* Parameter adjustments */ v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; --tau; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; /* Function Body */ if (*n == 0) { return 0; } if (lsame_(direct, "F")) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { if (tau[i__] == 0.) { /* H(i) = I */ i__2 = i__; for (j = 1; j <= i__2; ++j) { t[j + i__ * t_dim1] = 0.; /* L10: */ } } else { /* general case */ vii = v[i__ + i__ * v_dim1]; v[i__ + i__ * v_dim1] = 1.; if (lsame_(storev, "C")) { /* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; d__1 = -tau[i__]; dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b29, &t[ i__ * t_dim1 + 1], &c__1); } else { /* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */ i__2 = i__ - 1; i__3 = *n - i__ + 1; d__1 = -tau[i__]; dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ * v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, & c_b29, &t[i__ * t_dim1 + 1], &c__1); } v[i__ + i__ * v_dim1] = vii; /* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */ i__2 = i__ - 1; dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1); t[i__ + i__ * t_dim1] = tau[i__]; } /* L20: */ } } else { for (i__ = *k; i__ >= 1; --i__) { if (tau[i__] == 0.) { /* H(i) = I */ i__1 = *k; for (j = i__; j <= i__1; ++j) { t[j + i__ * t_dim1] = 0.; /* L30: */ } } else { /* general case */ if (i__ < *k) { if (lsame_(storev, "C")) { vii = v[*n - *k + i__ + i__ * v_dim1]; v[*n - *k + i__ + i__ * v_dim1] = 1.; /* T(i+1:k,i) := - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i) */ i__1 = *n - *k + i__; i__2 = *k - i__; d__1 = -tau[i__]; dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], & c__1, &c_b29, &t[i__ + 1 + i__ * t_dim1], & c__1); v[*n - *k + i__ + i__ * v_dim1] = vii; } else { vii = v[i__ + (*n - *k + i__) * v_dim1]; v[i__ + (*n - *k + i__) * v_dim1] = 1.; /* T(i+1:k,i) := - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)' */ i__1 = *k - i__; i__2 = *n - *k + i__; d__1 = -tau[i__]; dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ + 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, & c_b29, &t[i__ + 1 + i__ * t_dim1], &c__1); v[i__ + (*n - *k + i__) * v_dim1] = vii; } /* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */ i__1 = *k - i__; dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1], &c__1) ; } t[i__ + i__ * t_dim1] = tau[i__]; } /* L40: */ } } return 0; /* End of DLARFT */ } /* dlarft_ */ /* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) { /* System generated locals */ integer c_dim1, c_offset, i__1; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer j; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, v8, v9, t10, v10, sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFX applies a real elementary reflector H to a real m by n matrix C, from either the left or the right. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix This version uses inline code if H has order < 11. Arguments ========= SIDE (input) CHARACTER*1 = 'L': form H * C = 'R': form C * H M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L' or (N) if SIDE = 'R' The vector v in the representation of H. TAU (input) DOUBLE PRECISION The value tau in the representation of H. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by the matrix H * C if SIDE = 'L', or C * H if SIDE = 'R'. LDC (input) INTEGER The leading dimension of the array C. LDA >= (1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L' or (M) if SIDE = 'R' WORK is not referenced if H has order < 11. ===================================================================== */ /* Parameter adjustments */ --v; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ if (*tau == 0.) { return 0; } if (lsame_(side, "L")) { /* Form H * C, where H has order m. */ switch (*m) { case 1: goto L10; case 2: goto L30; case 3: goto L50; case 4: goto L70; case 5: goto L90; case 6: goto L110; case 7: goto L130; case 8: goto L150; case 9: goto L170; case 10: goto L190; } /* Code for general M w := C'*v */ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &c__1, & c_b29, &work[1], &c__1); /* C := C - tau * v * w' */ d__1 = -(*tau); dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc) ; goto L410; L10: /* Special code for 1 x 1 Householder */ t1 = 1. - *tau * v[1] * v[1]; i__1 = *n; for (j = 1; j <= i__1; ++j) { c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1]; /* L20: */ } goto L410; L30: /* Special code for 2 x 2 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; /* L40: */ } goto L410; L50: /* Special code for 3 x 3 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; /* L60: */ } goto L410; L70: /* Special code for 4 x 4 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; /* L80: */ } goto L410; L90: /* Special code for 5 x 5 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; /* L100: */ } goto L410; L110: /* Special code for 6 x 6 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; /* L120: */ } goto L410; L130: /* Special code for 7 x 7 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; /* L140: */ } goto L410; L150: /* Special code for 8 x 8 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; /* L160: */ } goto L410; L170: /* Special code for 9 x 9 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * c_dim1 + 9]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; c__[j * c_dim1 + 9] -= sum * t9; /* L180: */ } goto L410; L190: /* Special code for 10 x 10 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; v10 = v[10]; t10 = *tau * v10; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * c_dim1 + 9] + v10 * c__[j * c_dim1 + 10]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; c__[j * c_dim1 + 9] -= sum * t9; c__[j * c_dim1 + 10] -= sum * t10; /* L200: */ } goto L410; } else { /* Form C * H, where H has order n. */ switch (*n) { case 1: goto L210; case 2: goto L230; case 3: goto L250; case 4: goto L270; case 5: goto L290; case 6: goto L310; case 7: goto L330; case 8: goto L350; case 9: goto L370; case 10: goto L390; } /* Code for general N w := C * v */ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], & c__1, &c_b29, &work[1], &c__1); /* C := C - tau * w * v' */ d__1 = -(*tau); dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc) ; goto L410; L210: /* Special code for 1 x 1 Householder */ t1 = 1. - *tau * v[1] * v[1]; i__1 = *m; for (j = 1; j <= i__1; ++j) { c__[j + c_dim1] = t1 * c__[j + c_dim1]; /* L220: */ } goto L410; L230: /* Special code for 2 x 2 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; /* L240: */ } goto L410; L250: /* Special code for 3 x 3 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; /* L260: */ } goto L410; L270: /* Special code for 4 x 4 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; /* L280: */ } goto L410; L290: /* Special code for 5 x 5 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; /* L300: */ } goto L410; L310: /* Special code for 6 x 6 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; /* L320: */ } goto L410; L330: /* Special code for 7 x 7 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; /* L340: */ } goto L410; L350: /* Special code for 8 x 8 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; /* L360: */ } goto L410; L370: /* Special code for 9 x 9 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ j + c_dim1 * 9]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; c__[j + c_dim1 * 9] -= sum * t9; /* L380: */ } goto L410; L390: /* Special code for 10 x 10 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; v10 = v[10]; t10 = *tau * v10; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; c__[j + c_dim1 * 9] -= sum * t9; c__[j + c_dim1 * 10] -= sum * t10; /* L400: */ } goto L410; } L410: return 0; /* End of DLARFX */ } /* dlarfx_ */ /* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs, doublereal *sn, doublereal *r__) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal); /* Local variables */ static integer i__; static doublereal scale, f1; static integer count; static doublereal g1, safmn2, safmx2; static doublereal safmin, eps; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARTG generate a plane rotation so that [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1. [ -SN CS ] [ G ] [ 0 ] This is a slower, more accurate version of the BLAS1 routine DROTG, with the following other differences: F and G are unchanged on return. If G=0, then CS=1 and SN=0. If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any floating point operations (saves work in DBDSQR when there are zeros on the diagonal). If F exceeds G in magnitude, CS will be positive. Arguments ========= F (input) DOUBLE PRECISION The first component of vector to be rotated. G (input) DOUBLE PRECISION The second component of vector to be rotated. CS (output) DOUBLE PRECISION The cosine of the rotation. SN (output) DOUBLE PRECISION The sine of the rotation. R (output) DOUBLE PRECISION The nonzero component of the rotated vector. This version has a few statements commented out for thread safety (machine parameters are computed on each entry). 10 feb 03, SJH. ===================================================================== LOGICAL FIRST SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 DATA FIRST / .TRUE. / IF( FIRST ) THEN */ safmin = SAFEMINIMUM; eps = EPSILON; d__1 = BASE; i__1 = (integer) (log(safmin / eps) / log(BASE) / 2.); safmn2 = pow_di(&d__1, &i__1); safmx2 = 1. / safmn2; /* FIRST = .FALSE. END IF */ if (*g == 0.) { *cs = 1.; *sn = 0.; *r__ = *f; } else if (*f == 0.) { *cs = 0.; *sn = 1.; *r__ = *g; } else { f1 = *f; g1 = *g; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale >= safmx2) { count = 0; L10: ++count; f1 *= safmn2; g1 *= safmn2; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale >= safmx2) { goto L10; } /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; i__1 = count; for (i__ = 1; i__ <= i__1; ++i__) { *r__ *= safmx2; /* L20: */ } } else if (scale <= safmn2) { count = 0; L30: ++count; f1 *= safmx2; g1 *= safmx2; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale <= safmn2) { goto L30; } /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; i__1 = count; for (i__ = 1; i__ <= i__1; ++i__) { *r__ *= safmn2; /* L40: */ } } else { /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; } if (abs(*f) > abs(*g) && *cs < 0.) { *cs = -(*cs); *sn = -(*sn); *r__ = -(*r__); } } return 0; /* End of DLARTG */ } /* dlartg_ */ /* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__, doublereal *ssmin, doublereal *ssmax) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal fhmn, fhmx, c__, fa, ga, ha, as, at, au; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAS2 computes the singular values of the 2-by-2 matrix [ F G ] [ 0 H ]. On return, SSMIN is the smaller singular value and SSMAX is the larger singular value. Arguments ========= F (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. G (input) DOUBLE PRECISION The (1,2) element of the 2-by-2 matrix. H (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. SSMIN (output) DOUBLE PRECISION The smaller singular value. SSMAX (output) DOUBLE PRECISION The larger singular value. Further Details =============== Barring over/underflow, all output quantities are correct to within a few units in the last place (ulps), even in the absence of a guard digit in addition/subtraction. In IEEE arithmetic, the code works correctly if one matrix element is infinite. Overflow will not occur unless the largest singular value itself overflows, or is within a few ulps of overflow. (On machines with partial overflow, like the Cray, overflow may occur if the largest singular value is within a factor of 2 of overflow.) Underflow is harmless if underflow is gradual. Otherwise, results may correspond to a matrix modified by perturbations of size near the underflow threshold. ==================================================================== */ fa = abs(*f); ga = abs(*g); ha = abs(*h__); fhmn = min(fa,ha); fhmx = max(fa,ha); if (fhmn == 0.) { *ssmin = 0.; if (fhmx == 0.) { *ssmax = ga; } else { /* Computing 2nd power */ d__1 = min(fhmx,ga) / max(fhmx,ga); *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.); } } else { if (ga < fhmx) { as = fhmn / fhmx + 1.; at = (fhmx - fhmn) / fhmx; /* Computing 2nd power */ d__1 = ga / fhmx; au = d__1 * d__1; c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au)); *ssmin = fhmn * c__; *ssmax = fhmx / c__; } else { au = fhmx / ga; if (au == 0.) { /* Avoid possible harmful underflow if exponent range asymmetric (true SSMIN may not underflow even if AU underflows) */ *ssmin = fhmn * fhmx / ga; *ssmax = ga; } else { as = fhmn / fhmx + 1.; at = (fhmx - fhmn) / fhmx; /* Computing 2nd power */ d__1 = as * au; /* Computing 2nd power */ d__2 = at * au; c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.)); *ssmin = fhmn * c__ * au; *ssmin += *ssmin; *ssmax = ga / (c__ + c__); } } } return 0; /* End of DLAS2 */ } /* dlas2_ */ /* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku, doublereal *cfrom, doublereal *cto, integer *m, integer *n, doublereal *a, integer *lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; /* Local variables */ static logical done; static doublereal ctoc; static integer i__, j; extern logical lsame_(char *, char *); static integer itype, k1, k2, k3, k4; static doublereal cfrom1; static doublereal cfromc; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum, smlnum, mul, cto1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASCL multiplies the M by N real matrix A by the real scalar CTO/CFROM. This is done without over/underflow as long as the final result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that A may be full, upper triangular, lower triangular, upper Hessenberg, or banded. Arguments ========= TYPE (input) CHARACTER*1 TYPE indices the storage type of the input matrix. = 'G': A is a full matrix. = 'L': A is a lower triangular matrix. = 'U': A is an upper triangular matrix. = 'H': A is an upper Hessenberg matrix. = 'B': A is a symmetric band matrix with lower bandwidth KL and upper bandwidth KU and with the only the lower half stored. = 'Q': A is a symmetric band matrix with lower bandwidth KL and upper bandwidth KU and with the only the upper half stored. = 'Z': A is a band matrix with lower bandwidth KL and upper bandwidth KU. KL (input) INTEGER The lower bandwidth of A. Referenced only if TYPE = 'B', 'Q' or 'Z'. KU (input) INTEGER The upper bandwidth of A. Referenced only if TYPE = 'B', 'Q' or 'Z'. CFROM (input) DOUBLE PRECISION CTO (input) DOUBLE PRECISION The matrix A is multiplied by CTO/CFROM. A(I,J) is computed without over/underflow if the final result CTO*A(I,J)/CFROM can be represented without over/underflow. CFROM must be nonzero. M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) The matrix to be multiplied by CTO/CFROM. See TYPE for the storage type. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). INFO (output) INTEGER 0 - successful exit <0 - if INFO = -i, the i-th argument had an illegal value. ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; if (lsame_(type__, "G")) { itype = 0; } else if (lsame_(type__, "L")) { itype = 1; } else if (lsame_(type__, "U")) { itype = 2; } else if (lsame_(type__, "H")) { itype = 3; } else if (lsame_(type__, "B")) { itype = 4; } else if (lsame_(type__, "Q")) { itype = 5; } else if (lsame_(type__, "Z")) { itype = 6; } else { itype = -1; } if (itype == -1) { *info = -1; } else if (*cfrom == 0.) { *info = -4; } else if (*m < 0) { *info = -6; } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) { *info = -7; } else if (itype <= 3 && *lda < max(1,*m)) { *info = -9; } else if (itype >= 4) { /* Computing MAX */ i__1 = *m - 1; if (*kl < 0 || *kl > max(i__1,0)) { *info = -2; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = *n - 1; if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) && *kl != *ku) { *info = -3; } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < * ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) { *info = -9; } } } if (*info != 0) { i__1 = -(*info); xerbla_("DLASCL", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *m == 0) { return 0; } /* Get machine parameters */ smlnum = SAFEMINIMUM; bignum = 1. / smlnum; cfromc = *cfrom; ctoc = *cto; L10: cfrom1 = cfromc * smlnum; cto1 = ctoc / bignum; if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) { mul = smlnum; done = FALSE_; cfromc = cfrom1; } else if (abs(cto1) > abs(cfromc)) { mul = bignum; done = FALSE_; ctoc = cto1; } else { mul = ctoc / cfromc; done = TRUE_; } if (itype == 0) { /* Full matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L20: */ } /* L30: */ } } else if (itype == 1) { /* Lower triangular matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L40: */ } /* L50: */ } } else if (itype == 2) { /* Upper triangular matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = min(j,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L60: */ } /* L70: */ } } else if (itype == 3) { /* Upper Hessenberg matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = j + 1; i__2 = min(i__3,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L80: */ } /* L90: */ } } else if (itype == 4) { /* Lower half of a symmetric band matrix */ k3 = *kl + 1; k4 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = k3, i__4 = k4 - j; i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L100: */ } /* L110: */ } } else if (itype == 5) { /* Upper half of a symmetric band matrix */ k1 = *ku + 2; k3 = *ku + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = k1 - j; i__3 = k3; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { a[i__ + j * a_dim1] *= mul; /* L120: */ } /* L130: */ } } else if (itype == 6) { /* Band matrix */ k1 = *kl + *ku + 2; k2 = *kl + 1; k3 = (*kl << 1) + *ku + 1; k4 = *kl + *ku + 1 + *m; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__3 = k1 - j; /* Computing MIN */ i__4 = k3, i__5 = k4 - j; i__2 = min(i__4,i__5); for (i__ = max(i__3,k2); i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L140: */ } /* L150: */ } } if (! done) { goto L10; } return 0; /* End of DLASCL */ } /* dlascl_ */ /* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer * ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer * info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static doublereal beta; static integer idxq, nlvl, i__, j, m; static doublereal alpha; static integer inode, ndiml, idxqc, ndimr, itemp, sqrei, i1; extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer ic, lf, nd, ll, nl, nr; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), xerbla_( char *, integer *); static integer im1, ncc, nlf, nrf, iwk, lvl, ndb1, nlp1, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Using a divide and conquer approach, DLASD0 computes the singular value decomposition (SVD) of a real upper bidiagonal N-by-M matrix B with diagonal D and offdiagonal E, where M = N + SQRE. The algorithm computes orthogonal matrices U and VT such that B = U * S * VT. The singular values S are overwritten on D. A related subroutine, DLASDA, computes only the singular values, and optionally, the singular vectors in compact form. Arguments ========= N (input) INTEGER On entry, the row dimension of the upper bidiagonal matrix. This is also the dimension of the main diagonal array D. SQRE (input) INTEGER Specifies the column dimension of the bidiagonal matrix. = 0: The bidiagonal matrix has column dimension M = N; = 1: The bidiagonal matrix has column dimension M = N+1; D (input/output) DOUBLE PRECISION array, dimension (N) On entry D contains the main diagonal of the bidiagonal matrix. On exit D, if INFO = 0, contains its singular values. E (input) DOUBLE PRECISION array, dimension (M-1) Contains the subdiagonal entries of the bidiagonal matrix. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension at least (LDQ, N) On exit, U contains the left singular vectors. LDU (input) INTEGER On entry, leading dimension of U. VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M) On exit, VT' contains the right singular vectors. LDVT (input) INTEGER On entry, leading dimension of VT. SMLSIZ (input) INTEGER On entry, maximum size of the subproblems at the bottom of the computation tree. IWORK (workspace) INTEGER work array. Dimension must be at least (8 * N) WORK (workspace) DOUBLE PRECISION work array. Dimension must be at least (3 * M**2 + 2 * M) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --iwork; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*sqre < 0 || *sqre > 1) { *info = -2; } m = *n + *sqre; if (*ldu < *n) { *info = -6; } else if (*ldvt < m) { *info = -8; } else if (*smlsiz < 3) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD0", &i__1); return 0; } /* If the input matrix is too small, call DLASDQ to find the SVD. */ if (*n <= *smlsiz) { dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info); return 0; } /* Set up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; idxq = ndimr + *n; iwk = idxq + *n; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* For the nodes on bottom level of the tree, solve their subproblems by DLASDQ. */ ndb1 = (nd + 1) / 2; ncc = 0; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nlp1 = nl + 1; nr = iwork[ndimr + i1]; nrp1 = nr + 1; nlf = ic - nl; nrf = ic + 1; sqrei = 1; dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[ nlf + nlf * u_dim1], ldu, &work[1], info); if (*info != 0) { return 0; } itemp = idxq + nlf - 2; i__2 = nl; for (j = 1; j <= i__2; ++j) { iwork[itemp + j] = j; /* L10: */ } if (i__ == nd) { sqrei = *sqre; } else { sqrei = 1; } nrp1 = nr + sqrei; dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[ nrf + nrf * u_dim1], ldu, &work[1], info); if (*info != 0) { return 0; } itemp = idxq + ic; i__2 = nr; for (j = 1; j <= i__2; ++j) { iwork[itemp + j - 1] = j; /* L20: */ } /* L30: */ } /* Now conquer each subproblem bottom-up. */ for (lvl = nlvl; lvl >= 1; --lvl) { /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; if (*sqre == 0 && i__ == ll) { sqrei = *sqre; } else { sqrei = 1; } idxqc = idxq + nlf - 1; alpha = d__[ic]; beta = e[ic]; dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf * u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[ idxqc], &iwork[iwk], &work[1], info); if (*info != 0) { return 0; } /* L40: */ } /* L50: */ } return 0; /* End of DLASD0 */ } /* dlasd0_ */ /* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * iwork, doublereal *work, integer *info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxc, idxp, ldvt2, i__, k, m, n, n1, n2; extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, integer *, integer *), dlasd3_( integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer iq; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static integer iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *); static integer isigma; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal orgnrm; static integer coltyp, iu2, ldq, idx, ldu2, ivt2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B, where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0. A related subroutine DLASD7 handles the case in which the singular values (and the singular vectors in factored form) are desired. DLASD1 computes the SVD as follows: ( D1(in) 0 0 0 ) B = U(in) * ( Z1' a Z2' b ) * VT(in) ( 0 0 D2(in) 0 ) = U(out) * ( D(out) 0) * VT(out) where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros elsewhere; and the entry b is empty if SQRE = 0. The left singular vectors of the original matrix are stored in U, and the transpose of the right singular vectors are stored in VT, and the singular values are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple singular values or when there are zeros in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLASD2. The second stage consists of calculating the updated singular values. This is done by finding the square roots of the roots of the secular equation via the routine DLASD4 (as called by DLASD3). This routine also calculates the singular vectors of the current problem. The final stage consists of computing the updated singular vectors directly using the updated singular values. The singular vectors for the current problem are multiplied with the singular vectors from the overall problem. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. D (input/output) DOUBLE PRECISION array, dimension (N = NL+NR+1). On entry D(1:NL,1:NL) contains the singular values of the upper block; and D(NL+2:N) contains the singular values of the lower block. On exit D(1:N) contains the singular values of the modified matrix. ALPHA (input/output) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input/output) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. U (input/output) DOUBLE PRECISION array, dimension(LDU,N) On entry U(1:NL, 1:NL) contains the left singular vectors of the upper block; U(NL+2:N, NL+2:N) contains the left singular vectors of the lower block. On exit U contains the left singular vectors of the bidiagonal matrix. LDU (input) INTEGER The leading dimension of the array U. LDU >= max( 1, N ). VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) where M = N + SQRE. On entry VT(1:NL+1, 1:NL+1)' contains the right singular vectors of the upper block; VT(NL+2:M, NL+2:M)' contains the right singular vectors of the lower block. On exit VT' contains the right singular vectors of the bidiagonal matrix. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= max( 1, M ). IDXQ (output) INTEGER array, dimension(N) This contains the permutation which will reintegrate the subproblem just solved back into sorted order, i.e. D( IDXQ( I = 1, N ) ) will be in ascending order. IWORK (workspace) INTEGER array, dimension( 4 * N ) WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --idxq; --iwork; --work; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre < 0 || *sqre > 1) { *info = -3; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD1", &i__1); return 0; } n = *nl + *nr + 1; m = n + *sqre; /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLASD2 and DLASD3. */ ldu2 = n; ldvt2 = m; iz = 1; isigma = iz + m; iu2 = isigma + n; ivt2 = iu2 + ldu2 * n; iq = ivt2 + ldvt2 * m; idx = 1; idxc = idx + n; coltyp = idxc + n; idxp = coltyp + n; /* Scale. Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); orgnrm = max(d__1,d__2); d__[*nl + 1] = 0.; i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { orgnrm = (d__1 = d__[i__], abs(d__1)); } /* L10: */ } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); *alpha /= orgnrm; *beta /= orgnrm; /* Deflate singular values. */ dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset], ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, & work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], & idxq[1], &iwork[coltyp], info); /* Solve Secular Equation and update singular vectors. */ ldq = k; dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info); if (*info != 0) { return 0; } /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); /* Prepare the IDXQ sorting permutation. */ n1 = k; n2 = n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); return 0; /* End of DLASD1 */ } /* dlasd1_ */ /* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * idxq, integer *coltyp, integer *info) { /* System generated locals */ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, vt2_dim1, vt2_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxi, idxj; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer ctot[4]; static doublereal c__; static integer i__, j, m, n; static doublereal s; static integer idxjp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jprev, k2; static doublereal z1; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ct; static integer jp; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal hlftol, eps, tau, tol; static integer psm[4], nlp1, nlp2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD2 merges the two sets of singular values together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more singular values are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. DLASD2 is called from DLASD1. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (output) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. D (input/output) DOUBLE PRECISION array, dimension(N) On entry D contains the singular values of the two submatrices to be combined. On exit D contains the trailing (N-K) updated singular values (those which were deflated) sorted into increasing order. Z (output) DOUBLE PRECISION array, dimension(N) On exit Z contains the updating row vector in the secular equation. ALPHA (input) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. U (input/output) DOUBLE PRECISION array, dimension(LDU,N) On entry U contains the left singular vectors of two submatrices in the two square blocks with corners at (1,1), (NL, NL), and (NL+2, NL+2), (N,N). On exit U contains the trailing (N-K) updated left singular vectors (those which were deflated) in its last N-K columns. LDU (input) INTEGER The leading dimension of the array U. LDU >= N. VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) On entry VT' contains the right singular vectors of two submatrices in the two square blocks with corners at (1,1), (NL+1, NL+1), and (NL+2, NL+2), (M,M). On exit VT' contains the trailing (N-K) updated right singular vectors (those which were deflated) in its last N-K columns. In case SQRE =1, the last row of VT spans the right null space. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= M. DSIGMA (output) DOUBLE PRECISION array, dimension (N) Contains a copy of the diagonal elements (K-1 singular values and one zero) in the secular equation. U2 (output) DOUBLE PRECISION array, dimension(LDU2,N) Contains a copy of the first K-1 left singular vectors which will be used by DLASD3 in a matrix multiply (DGEMM) to solve for the new left singular vectors. U2 is arranged into four blocks. The first block contains a column with 1 at NL+1 and zero everywhere else; the second block contains non-zero entries only at and above NL; the third contains non-zero entries only below NL+1; and the fourth is dense. LDU2 (input) INTEGER The leading dimension of the array U2. LDU2 >= N. VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N) VT2' contains a copy of the first K right singular vectors which will be used by DLASD3 in a matrix multiply (DGEMM) to solve for the new right singular vectors. VT2 is arranged into three blocks. The first block contains a row that corresponds to the special 0 diagonal element in SIGMA; the second block contains non-zeros only at and before NL +1; the third block contains non-zeros only at and after NL +2. LDVT2 (input) INTEGER The leading dimension of the array VT2. LDVT2 >= M. IDXP (workspace) INTEGER array dimension(N) This will contain the permutation used to place deflated values of D at the end of the array. On output IDXP(2:K) points to the nondeflated D-values and IDXP(K+1:N) points to the deflated singular values. IDX (workspace) INTEGER array dimension(N) This will contain the permutation used to sort the contents of D into ascending order. IDXC (output) INTEGER array dimension(N) This will contain the permutation used to arrange the columns of the deflated U matrix into three groups: the first group contains non-zero entries only at and above NL, the second contains non-zero entries only below NL+2, and the third is dense. IDXQ (input/output) INTEGER array dimension(N) This contains the permutation which separately sorts the two sub-problems in D into ascending order. Note that entries in the first hlaf of this permutation must first be moved one position backward; and entries in the second half must first have NL+1 added to their values. COLTYP (workspace/output) INTEGER array dimension(N) As workspace, this will contain a label which will indicate which of the following types a column in the U2 matrix or a row in the VT2 matrix is: 1 : non-zero in the upper half only 2 : non-zero in the lower half only 3 : dense 4 : deflated On exit, it is an array of dimension 4, with COLTYP(I) being the dimension of the I-th type columns. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --dsigma; u2_dim1 = *ldu2; u2_offset = 1 + u2_dim1 * 1; u2 -= u2_offset; vt2_dim1 = *ldvt2; vt2_offset = 1 + vt2_dim1 * 1; vt2 -= vt2_offset; --idxp; --idx; --idxc; --idxq; --coltyp; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre != 1 && *sqre != 0) { *info = -3; } n = *nl + *nr + 1; m = n + *sqre; if (*ldu < n) { *info = -10; } else if (*ldvt < m) { *info = -12; } else if (*ldu2 < n) { *info = -15; } else if (*ldvt2 < m) { *info = -17; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD2", &i__1); return 0; } nlp1 = *nl + 1; nlp2 = *nl + 2; /* Generate the first part of the vector Z; and move the singular values in the first part of D one position backward. */ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1]; z__[1] = z1; for (i__ = *nl; i__ >= 1; --i__) { z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1]; d__[i__ + 1] = d__[i__]; idxq[i__ + 1] = idxq[i__] + 1; /* L10: */ } /* Generate the second part of the vector Z. */ i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1]; /* L20: */ } /* Initialize some reference arrays. */ i__1 = nlp1; for (i__ = 2; i__ <= i__1; ++i__) { coltyp[i__] = 1; /* L30: */ } i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { coltyp[i__] = 2; /* L40: */ } /* Sort the singular values into increasing order */ i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { idxq[i__] += nlp1; /* L50: */ } /* DSIGMA, IDXC, IDXC, and the first column of U2 are used as storage space. */ i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dsigma[i__] = d__[idxq[i__]]; u2[i__ + u2_dim1] = z__[idxq[i__]]; idxc[i__] = coltyp[idxq[i__]]; /* L60: */ } dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { idxi = idx[i__] + 1; d__[i__] = dsigma[idxi]; z__[i__] = u2[idxi + u2_dim1]; coltyp[i__] = idxc[idxi]; /* L70: */ } /* Calculate the allowable deflation tolerance */ eps = EPSILON; /* Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); tol = max(d__1,d__2); /* Computing MAX */ d__2 = (d__1 = d__[n], abs(d__1)); tol = eps * 8. * max(d__2,tol); /* There are 2 kinds of deflation -- first a value in the z-vector is small, second two (or more) singular values are very close together (their difference is small). If the value in the z-vector is small, we simply permute the array so that the corresponding singular value is moved to the end. If two values in the D-vector are close, we perform a two-sided rotation designed to make one of the corresponding z-vector entries zero, and then permute the array so that the deflated singular value is moved to the end. If there are multiple singular values then the problem deflates. Here the number of equal singular values are found. As each equal singular value is found, an elementary reflector is computed to rotate the corresponding singular subspace so that the corresponding components of Z are zero in this new basis. */ *k = 1; k2 = n + 1; i__1 = n; for (j = 2; j <= i__1; ++j) { if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; coltyp[j] = 4; if (j == n) { goto L120; } } else { jprev = j; goto L90; } /* L80: */ } L90: j = jprev; L100: ++j; if (j > n) { goto L110; } if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; coltyp[j] = 4; } else { /* Check if singular values are close enough to allow deflation. */ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { /* Deflation is possible. */ s = z__[jprev]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); c__ /= tau; s = -s / tau; z__[j] = tau; z__[jprev] = 0.; /* Apply back the Givens rotation to the left and right singular vector matrices. */ idxjp = idxq[idx[jprev] + 1]; idxj = idxq[idx[j] + 1]; if (idxjp <= nlp1) { --idxjp; } if (idxj <= nlp1) { --idxj; } drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], & c__1, &c__, &s); drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, & c__, &s); if (coltyp[j] != coltyp[jprev]) { coltyp[j] = 3; } coltyp[jprev] = 4; --k2; idxp[k2] = jprev; jprev = j; } else { ++(*k); u2[*k + u2_dim1] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; jprev = j; } } goto L100; L110: /* Record the last singular value. */ ++(*k); u2[*k + u2_dim1] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; L120: /* Count up the total number of the various types of columns, then form a permutation which positions the four column types into four groups of uniform structure (although one or more of these groups may be empty). */ for (j = 1; j <= 4; ++j) { ctot[j - 1] = 0; /* L130: */ } i__1 = n; for (j = 2; j <= i__1; ++j) { ct = coltyp[j]; ++ctot[ct - 1]; /* L140: */ } /* PSM(*) = Position in SubMatrix (of types 1 through 4) */ psm[0] = 2; psm[1] = ctot[0] + 2; psm[2] = psm[1] + ctot[1]; psm[3] = psm[2] + ctot[2]; /* Fill out the IDXC array so that the permutation which it induces will place all type-1 columns first, all type-2 columns next, then all type-3's, and finally all type-4's, starting from the second column. This applies similarly to the rows of VT. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; ct = coltyp[jp]; idxc[psm[ct - 1]] = j; ++psm[ct - 1]; /* L150: */ } /* Sort the singular values and corresponding singular vectors into DSIGMA, U2, and VT2 respectively. The singular values/vectors which were not deflated go into the first K slots of DSIGMA, U2, and VT2 respectively, while those which were deflated go into the last N - K slots, except that the first column/row will be treated separately. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; dsigma[j] = d__[jp]; idxj = idxq[idx[idxp[idxc[j]]] + 1]; if (idxj <= nlp1) { --idxj; } dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1); dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2); /* L160: */ } /* Determine DSIGMA(1), DSIGMA(2) and Z(1) */ dsigma[1] = 0.; hlftol = tol / 2.; if (abs(dsigma[2]) <= hlftol) { dsigma[2] = hlftol; } if (m > n) { z__[1] = dlapy2_(&z1, &z__[m]); if (z__[1] <= tol) { c__ = 1.; s = 0.; z__[1] = tol; } else { c__ = z1 / z__[1]; s = z__[m] / z__[1]; } } else { if (abs(z1) <= tol) { z__[1] = tol; } else { z__[1] = z1; } } /* Move the rest of the updating row to Z. */ i__1 = *k - 1; dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1); /* Determine the first column of U2, the first row of VT2 and the last row of VT. */ dlaset_("A", &n, &c__1, &c_b29, &c_b29, &u2[u2_offset], ldu2); u2[nlp1 + u2_dim1] = 1.; if (m > n) { i__1 = nlp1; for (i__ = 1; i__ <= i__1; ++i__) { vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1]; vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1]; /* L170: */ } i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1]; vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1]; /* L180: */ } } else { dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2); } if (m > n) { dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2); } /* The deflated singular values and their corresponding vectors go into the back of D, U, and V respectively. */ if (n > *k) { i__1 = n - *k; dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = n - *k; dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1) * u_dim1 + 1], ldu); i__1 = n - *k; dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 + vt_dim1], ldvt); } /* Copy CTOT into COLTYP for referencing in DLASD3. */ for (j = 1; j <= 4; ++j) { coltyp[j] = ctot[j - 1]; /* L190: */ } return 0; /* End of DLASD2 */ } /* dlasd2_ */ /* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, integer *idxc, integer *ctot, doublereal *z__, integer *info) { /* System generated locals */ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, vt2_dim1, vt2_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j, m, n; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ctemp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer ktemp; extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer jc; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal rho; static integer nlp1, nlp2, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD3 finds all the square roots of the roots of the secular equation, as defined by the values in D and Z. It makes the appropriate calls to DLASD4 and then updates the singular vectors by matrix multiplication. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. DLASD3 is called from DLASD1. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (input) INTEGER The size of the secular equation, 1 =< K = < N. D (output) DOUBLE PRECISION array, dimension(K) On exit the square roots of the roots of the secular equation, in ascending order. Q (workspace) DOUBLE PRECISION array, dimension at least (LDQ,K). LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= K. DSIGMA (input) DOUBLE PRECISION array, dimension(K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. U (output) DOUBLE PRECISION array, dimension (LDU, N) The last N - K columns of this matrix contain the deflated left singular vectors. LDU (input) INTEGER The leading dimension of the array U. LDU >= N. U2 (input/output) DOUBLE PRECISION array, dimension (LDU2, N) The first K columns of this matrix contain the non-deflated left singular vectors for the split problem. LDU2 (input) INTEGER The leading dimension of the array U2. LDU2 >= N. VT (output) DOUBLE PRECISION array, dimension (LDVT, M) The last M - K columns of VT' contain the deflated right singular vectors. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= N. VT2 (input/output) DOUBLE PRECISION array, dimension (LDVT2, N) The first K columns of VT2' contain the non-deflated right singular vectors for the split problem. LDVT2 (input) INTEGER The leading dimension of the array VT2. LDVT2 >= N. IDXC (input) INTEGER array, dimension ( N ) The permutation used to arrange the columns of U (and rows of VT) into three groups: the first group contains non-zero entries only at and above (or before) NL +1; the second contains non-zero entries only at and below (or after) NL+2; and the third is dense. The first column of U and the row of VT are treated separately, however. The rows of the singular vectors found by DLASD4 must be likewise permuted before the matrix multiplies can take place. CTOT (input) INTEGER array, dimension ( 4 ) A count of the total number of the various types of columns in U (or rows in VT), as described in IDXC. The fourth column type is any column which has been deflated. Z (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating row vector. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dsigma; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; u2_dim1 = *ldu2; u2_offset = 1 + u2_dim1 * 1; u2 -= u2_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; vt2_dim1 = *ldvt2; vt2_offset = 1 + vt2_dim1 * 1; vt2 -= vt2_offset; --idxc; --ctot; --z__; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre != 1 && *sqre != 0) { *info = -3; } n = *nl + *nr + 1; m = n + *sqre; nlp1 = *nl + 1; nlp2 = *nl + 2; if (*k < 1 || *k > n) { *info = -4; } else if (*ldq < *k) { *info = -7; } else if (*ldu < n) { *info = -10; } else if (*ldu2 < n) { *info = -12; } else if (*ldvt < m) { *info = -14; } else if (*ldvt2 < m) { *info = -16; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD3", &i__1); return 0; } /* Quick return if possible */ if (*k == 1) { d__[1] = abs(z__[1]); dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt); if (z__[1] > 0.) { dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1); } else { i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { u[i__ + u_dim1] = -u2[i__ + u2_dim1]; /* L10: */ } } return 0; } /* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), which on any of these machines zeros out the bottommost bit of DSIGMA(I) if it is 1; this makes the subsequent subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DSIGMA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DSIGMA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DSIGMA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; /* L20: */ } /* Keep a copy of Z. */ dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1); /* Normalize Z. */ rho = dnrm2_(k, &z__[1], &c__1); dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); rho *= rho; /* Find the new singular values. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j], &vt[j * vt_dim1 + 1], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { return 0; } /* L30: */ } /* Compute updated Z. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1]; i__2 = i__ - 1; for (j = 1; j <= i__2; ++j) { z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]); /* L40: */ } i__2 = *k - 1; for (j = i__; j <= i__2; ++j) { z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]); /* L50: */ } d__2 = sqrt((d__1 = z__[i__], abs(d__1))); z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]); /* L60: */ } /* Compute left singular vectors of the modified diagonal matrix, and store related information for the right singular vectors. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ * vt_dim1 + 1]; u[i__ * u_dim1 + 1] = -1.; i__2 = *k; for (j = 2; j <= i__2; ++j) { vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__ * vt_dim1]; u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1]; /* L70: */ } temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1); q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp; i__2 = *k; for (j = 2; j <= i__2; ++j) { jc = idxc[j]; q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp; /* L80: */ } /* L90: */ } /* Update the left singular vector matrix. */ if (*k == 2) { dgemm_("N", "N", &n, k, k, &c_b15, &u2[u2_offset], ldu2, &q[q_offset], ldq, &c_b29, &u[u_offset], ldu); goto L100; } if (ctot[1] > 0) { dgemm_("N", "N", nl, k, &ctot[1], &c_b15, &u2[(u2_dim1 << 1) + 1], ldu2, &q[q_dim1 + 2], ldq, &c_b29, &u[u_dim1 + 1], ldu); if (ctot[3] > 0) { ktemp = ctot[1] + 2 + ctot[2]; dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1] , ldu2, &q[ktemp + q_dim1], ldq, &c_b15, &u[u_dim1 + 1], ldu); } } else if (ctot[3] > 0) { ktemp = ctot[1] + 2 + ctot[2]; dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[u_dim1 + 1], ldu); } else { dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu); } dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu); ktemp = ctot[1] + 2; ctemp = ctot[2] + ctot[3]; dgemm_("N", "N", nr, k, &ctemp, &c_b15, &u2[nlp2 + ktemp * u2_dim1], ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[nlp2 + u_dim1], ldu); /* Generate the right singular vectors. */ L100: i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1); q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp; i__2 = *k; for (j = 2; j <= i__2; ++j) { jc = idxc[j]; q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp; /* L110: */ } /* L120: */ } /* Update the right singular vector matrix. */ if (*k == 2) { dgemm_("N", "N", k, &m, k, &c_b15, &q[q_offset], ldq, &vt2[vt2_offset] , ldvt2, &c_b29, &vt[vt_offset], ldvt); return 0; } ktemp = ctot[1] + 1; dgemm_("N", "N", k, &nlp1, &ktemp, &c_b15, &q[q_dim1 + 1], ldq, &vt2[ vt2_dim1 + 1], ldvt2, &c_b29, &vt[vt_dim1 + 1], ldvt); ktemp = ctot[1] + 2 + ctot[2]; if (ktemp <= *ldvt2) { dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b15, &q[ktemp * q_dim1 + 1], ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b15, &vt[vt_dim1 + 1], ldvt); } ktemp = ctot[1] + 1; nrp1 = *nr + *sqre; if (ktemp > 1) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { q[i__ + ktemp * q_dim1] = q[i__ + q_dim1]; /* L130: */ } i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1]; /* L140: */ } } ctemp = ctot[2] + 1 + ctot[3]; dgemm_("N", "N", k, &nrp1, &ctemp, &c_b15, &q[ktemp * q_dim1 + 1], ldq, & vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b29, &vt[nlp2 * vt_dim1 + 1], ldvt); return 0; /* End of DLASD3 */ } /* dlasd3_ */ /* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal * sigma, doublereal *work, integer *info) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal dphi, dpsi; static integer iter; static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, a, b, c__; static integer j; static doublereal w, dtiim, delsq, dtiip; static integer niter; static doublereal dtisq; static logical swtch; static doublereal dtnsq; extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *) , dlasd5_(integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal delsq2, dd[3], dtnsq1; static logical swtch3; static integer ii; static doublereal dw, zz[3]; static logical orgati; static doublereal erretm, dtipsq, rhoinv; static integer ip1; static doublereal eta, phi, eps, tau, psi; static integer iim1, iip1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the square root of the I-th updated eigenvalue of a positive symmetric rank-one modification to a positive diagonal matrix whose entries are given as the squares of the corresponding entries in the array d, and that 0 <= D(i) < D(j) for i < j and that RHO > 0. This is arranged by the calling routine, and is no loss in generality. The rank-one modified system is thus diag( D ) * diag( D ) + RHO * Z * Z_transpose. where we assume the Euclidean norm of Z is 1. The method consists of approximating the rational functions in the secular equation by simpler interpolating rational functions. Arguments ========= N (input) INTEGER The length of all arrays. I (input) INTEGER The index of the eigenvalue to be computed. 1 <= I <= N. D (input) DOUBLE PRECISION array, dimension ( N ) The original eigenvalues. It is assumed that they are in order, 0 <= D(I) < D(J) for I < J. Z (input) DOUBLE PRECISION array, dimension ( N ) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension ( N ) If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th component. If N = 1, then DELTA(1) = 1. The vector DELTA contains the information necessary to construct the (singular) eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. SIGMA (output) DOUBLE PRECISION The computed sigma_I, the I-th updated eigenvalue. WORK (workspace) DOUBLE PRECISION array, dimension ( N ) If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th component. If N = 1, then WORK( 1 ) = 1. INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, the updating process failed. Internal Parameters =================== Logical variable ORGATI (origin-at-i?) is used for distinguishing whether D(i) or D(i+1) is treated as the origin. ORGATI = .true. origin at i ORGATI = .false. origin at i+1 Logical variable SWTCH3 (switch-for-3-poles?) is for noting if we are working with THREE poles! MAXIT is the maximum number of iterations allowed for each eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== Since this routine is called in an inner loop, we do no argument checking. Quick return for N=1 and 2. */ /* Parameter adjustments */ --work; --delta; --z__; --d__; /* Function Body */ *info = 0; if (*n == 1) { /* Presumably, I=1 upon entry */ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]); delta[1] = 1.; work[1] = 1.; return 0; } if (*n == 2) { dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]); return 0; } /* Compute machine epsilon */ eps = EPSILON; rhoinv = 1. / *rho; /* The case I = N */ if (*i__ == *n) { /* Initialize some basic variables */ ii = *n - 1; niter = 1; /* Calculate initial guess */ temp = *rho / 2.; /* If ||Z||_2 is not one, then TEMP should be set to RHO * ||Z||_2^2 / TWO */ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp)); i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*n] + temp1; delta[j] = d__[j] - d__[*n] - temp1; /* L10: */ } psi = 0.; i__1 = *n - 2; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / (delta[j] * work[j]); /* L20: */ } c__ = rhoinv + psi; w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[* n] / (delta[*n] * work[*n]); if (w <= 0.) { temp1 = sqrt(d__[*n] * d__[*n] + *rho); temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[* n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] * z__[*n] / *rho; /* The following TAU is to approximate SIGMA_n^2 - D( N )*D( N ) */ if (c__ <= temp) { tau = *rho; } else { delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[* n]; b = z__[*n] * z__[*n] * delsq; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } } /* It can be proved that D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO */ } else { delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; b = z__[*n] * z__[*n] * delsq; /* The following TAU is to approximate SIGMA_n^2 - D( N )*D( N ) */ if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } /* It can be proved that D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2 */ } /* The following ETA is to approximate SIGMA_n - D( N ) */ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau)); *sigma = d__[*n] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - eta; work[j] = d__[j] + d__[*i__] + eta; /* L30: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (delta[j] * work[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L40: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (delta[*n] * work[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ ++niter; dtnsq1 = work[*n - 1] * delta[*n - 1]; dtnsq = work[*n] * delta[*n]; c__ = w - dtnsq1 * dpsi - dtnsq * dphi; a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi); b = dtnsq * dtnsq1 * w; if (c__ < 0.) { c__ = abs(c__); } if (c__ == 0.) { eta = *rho - *sigma * *sigma; } else if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = eta - dtnsq; if (temp > *rho) { eta = *rho + dtnsq; } tau += eta; eta /= *sigma + sqrt(eta + *sigma * *sigma); i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; work[j] += eta; /* L50: */ } *sigma += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L60: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (work[*n] * delta[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 20; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ dtnsq1 = work[*n - 1] * delta[*n - 1]; dtnsq = work[*n] * delta[*n]; c__ = w - dtnsq1 * dpsi - dtnsq * dphi; a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi); b = dtnsq1 * dtnsq * w; if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = eta - dtnsq; if (temp <= 0.) { eta /= 2.; } tau += eta; eta /= *sigma + sqrt(eta + *sigma * *sigma); i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; work[j] += eta; /* L70: */ } *sigma += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L80: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (work[*n] * delta[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( dpsi + dphi); w = rhoinv + phi + psi; /* L90: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; goto L240; /* End for the case I = N */ } else { /* The case for I < N */ niter = 1; ip1 = *i__ + 1; /* Calculate initial guess */ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]); delsq2 = delsq / 2.; temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2)); i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*i__] + temp; delta[j] = d__[j] - d__[*i__] - temp; /* L100: */ } psi = 0.; i__1 = *i__ - 1; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / (work[j] * delta[j]); /* L110: */ } phi = 0.; i__1 = *i__ + 2; for (j = *n; j >= i__1; --j) { phi += z__[j] * z__[j] / (work[j] * delta[j]); /* L120: */ } c__ = rhoinv + psi + phi; w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[ ip1] * z__[ip1] / (work[ip1] * delta[ip1]); if (w > 0.) { /* d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2 We choose d(i) as origin. */ orgati = TRUE_; sg2lb = 0.; sg2ub = delsq2; a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; b = z__[*i__] * z__[*i__] * delsq; if (a > 0.) { tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } else { tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } /* TAU now is an estimation of SIGMA^2 - D( I )^2. The following, however, is the corresponding estimation of SIGMA - D( I ). */ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau)); } else { /* (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2 We choose d(i+1) as origin. */ orgati = FALSE_; sg2lb = -delsq2; sg2ub = 0.; a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; b = z__[ip1] * z__[ip1] * delsq; if (a < 0.) { tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( d__1)))); } else { tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / (c__ * 2.); } /* TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The following, however, is the corresponding estimation of SIGMA - D( IP1 ). */ eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau, abs(d__1)))); } if (orgati) { ii = *i__; *sigma = d__[*i__] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*i__] + eta; delta[j] = d__[j] - d__[*i__] - eta; /* L130: */ } } else { ii = *i__ + 1; *sigma = d__[ip1] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[ip1] + eta; delta[j] = d__[j] - d__[ip1] - eta; /* L140: */ } } iim1 = ii - 1; iip1 = ii + 1; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L150: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L160: */ } w = rhoinv + phi + psi; /* W is the value of the secular function with its ii-th element removed. */ swtch3 = FALSE_; if (orgati) { if (w < 0.) { swtch3 = TRUE_; } } else { if (w > 0.) { swtch3 = TRUE_; } } if (ii == 1 || ii == *n) { swtch3 = FALSE_; } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w += temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } /* Calculate the new step */ ++niter; if (! swtch3) { dtipsq = work[ip1] * delta[ip1]; dtisq = work[*i__] * delta[*i__]; if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / dtisq; c__ = w - dtipsq * dw + delsq * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / dtipsq; c__ = w - dtisq * dw - delsq * (d__1 * d__1); } a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; b = dtipsq * dtisq * w; if (c__ == 0.) { if (a == 0.) { if (orgati) { a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi + dphi); } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } } else { /* Interpolation using THREE most relevant poles */ dtiim = work[iim1] * delta[iim1]; dtiip = work[iip1] * delta[iip1]; temp = rhoinv + psi + phi; if (orgati) { temp1 = z__[iim1] / dtiim; temp1 *= temp1; c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; if (dpsi < temp1) { zz[2] = dtiip * dtiip * dphi; } else { zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); } } else { temp1 = z__[iip1] / dtiip; temp1 *= temp1; c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[iip1]) * temp1; if (dphi < temp1) { zz[0] = dtiim * dtiim * dpsi; } else { zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); } zz[2] = z__[iip1] * z__[iip1]; } zz[1] = z__[ii] * z__[ii]; dd[0] = dtiim; dd[1] = delta[ii] * work[ii]; dd[2] = dtiip; dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); if (*info != 0) { goto L240; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } if (orgati) { temp1 = work[*i__] * delta[*i__]; temp = eta - temp1; } else { temp1 = work[ip1] * delta[ip1]; temp = eta - temp1; } if (temp > sg2ub || temp < sg2lb) { if (w < 0.) { eta = (sg2ub - tau) / 2.; } else { eta = (sg2lb - tau) / 2.; } } tau += eta; eta /= *sigma + sqrt(*sigma * *sigma + eta); prew = w; *sigma += eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] += eta; delta[j] -= eta; /* L170: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L180: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L190: */ } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } swtch = FALSE_; if (orgati) { if (-w > abs(prew) / 10.) { swtch = TRUE_; } } else { if (w > abs(prew) / 10.) { swtch = TRUE_; } } /* Main loop to update the values of the array DELTA and WORK */ iter = niter + 1; for (niter = iter; niter <= 20; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ if (! swtch3) { dtipsq = work[ip1] * delta[ip1]; dtisq = work[*i__] * delta[*i__]; if (! swtch) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / dtisq; c__ = w - dtipsq * dw + delsq * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / dtipsq; c__ = w - dtisq * dw - delsq * (d__1 * d__1); } } else { temp = z__[ii] / (work[ii] * delta[ii]); if (orgati) { dpsi += temp * temp; } else { dphi += temp * temp; } c__ = w - dtisq * dpsi - dtipsq * dphi; } a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; b = dtipsq * dtisq * w; if (c__ == 0.) { if (a == 0.) { if (! swtch) { if (orgati) { a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + dtisq * dtisq * ( dpsi + dphi); } } else { a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi; } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))); } } else { /* Interpolation using THREE most relevant poles */ dtiim = work[iim1] * delta[iim1]; dtiip = work[iip1] * delta[iip1]; temp = rhoinv + psi + phi; if (swtch) { c__ = temp - dtiim * dpsi - dtiip * dphi; zz[0] = dtiim * dtiim * dpsi; zz[2] = dtiip * dtiip * dphi; } else { if (orgati) { temp1 = z__[iim1] / dtiim; temp1 *= temp1; temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[ iip1]) * temp1; c__ = temp - dtiip * (dpsi + dphi) - temp2; zz[0] = z__[iim1] * z__[iim1]; if (dpsi < temp1) { zz[2] = dtiip * dtiip * dphi; } else { zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); } } else { temp1 = z__[iip1] / dtiip; temp1 *= temp1; temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[ iip1]) * temp1; c__ = temp - dtiim * (dpsi + dphi) - temp2; if (dphi < temp1) { zz[0] = dtiim * dtiim * dpsi; } else { zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); } zz[2] = z__[iip1] * z__[iip1]; } } dd[0] = dtiim; dd[1] = delta[ii] * work[ii]; dd[2] = dtiip; dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); if (*info != 0) { goto L240; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } if (orgati) { temp1 = work[*i__] * delta[*i__]; temp = eta - temp1; } else { temp1 = work[ip1] * delta[ip1]; temp = eta - temp1; } if (temp > sg2ub || temp < sg2lb) { if (w < 0.) { eta = (sg2ub - tau) / 2.; } else { eta = (sg2lb - tau) / 2.; } } tau += eta; eta /= *sigma + sqrt(*sigma * *sigma + eta); *sigma += eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] += eta; delta[j] -= eta; /* L200: */ } prew = w; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L210: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L220: */ } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w * prew > 0. && abs(w) > abs(prew) / 10.) { swtch = ! swtch; } if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } /* L230: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; } L240: return 0; /* End of DLASD4 */ } /* dlasd4_ */ /* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * work) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal b, c__, w, delsq, del, tau; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the square root of the I-th eigenvalue of a positive symmetric rank-one modification of a 2-by-2 diagonal matrix diag( D ) * diag( D ) + RHO * Z * transpose(Z) . The diagonal entries in the array D are assumed to satisfy 0 <= D(i) < D(j) for i < j . We also assume RHO > 0 and that the Euclidean norm of the vector Z is one. Arguments ========= I (input) INTEGER The index of the eigenvalue to be computed. I = 1 or I = 2. D (input) DOUBLE PRECISION array, dimension ( 2 ) The original eigenvalues. We assume 0 <= D(1) < D(2). Z (input) DOUBLE PRECISION array, dimension ( 2 ) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension ( 2 ) Contains (D(j) - sigma_I) in its j-th component. The vector DELTA contains the information necessary to construct the eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DSIGMA (output) DOUBLE PRECISION The computed sigma_I, the I-th updated eigenvalue. WORK (workspace) DOUBLE PRECISION array, dimension ( 2 ) WORK contains (D(j) + sigma_I) in its j-th component. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== */ /* Parameter adjustments */ --work; --delta; --z__; --d__; /* Function Body */ del = d__[2] - d__[1]; delsq = del * (d__[2] + d__[1]); if (*i__ == 1) { w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] * z__[1] / (d__[1] * 3. + d__[2])) / del + 1.; if (w > 0.) { b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[1] * z__[1] * delsq; /* B > ZERO, always The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 ) */ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); /* The following TAU is DSIGMA - D( 1 ) */ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau); *dsigma = d__[1] + tau; delta[1] = -tau; delta[2] = del - tau; work[1] = d__[1] * 2. + tau; work[2] = d__[1] + tau + d__[2]; /* DELTA( 1 ) = -Z( 1 ) / TAU DELTA( 2 ) = Z( 2 ) / ( DEL-TAU ) */ } else { b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * delsq; /* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ if (b > 0.) { tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); } else { tau = (b - sqrt(b * b + c__ * 4.)) / 2.; } /* The following TAU is DSIGMA - D( 2 ) */ tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1))); *dsigma = d__[2] + tau; delta[1] = -(del + tau); delta[2] = -tau; work[1] = d__[1] + tau + d__[2]; work[2] = d__[2] * 2. + tau; /* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) DELTA( 2 ) = -Z( 2 ) / TAU */ } /* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) DELTA( 1 ) = DELTA( 1 ) / TEMP DELTA( 2 ) = DELTA( 2 ) / TEMP */ } else { /* Now I=2 */ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * delsq; /* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ if (b > 0.) { tau = (b + sqrt(b * b + c__ * 4.)) / 2.; } else { tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); } /* The following TAU is DSIGMA - D( 2 ) */ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau); *dsigma = d__[2] + tau; delta[1] = -(del + tau); delta[2] = -tau; work[1] = d__[1] + tau + d__[2]; work[2] = d__[2] * 2. + tau; /* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) DELTA( 2 ) = -Z( 2 ) / TAU TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) DELTA( 1 ) = DELTA( 1 ) / TEMP DELTA( 2 ) = DELTA( 2 ) / TEMP */ } return 0; /* End of DLASD5 */ } /* dlasd5_ */ /* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxc, idxp, ivfw, ivlw, i__, m, n; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer n1, n2; extern /* Subroutine */ int dlasd7_(integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlasd8_( integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iw; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *); static integer isigma; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal orgnrm; static integer idx; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD6 computes the SVD of an updated upper bidiagonal matrix B obtained by merging two smaller ones by appending a row. This routine is used only for the problem which requires all singular values and optionally singular vector matrices in factored form. B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE. A related subroutine, DLASD1, handles the case in which all singular values and singular vectors of the bidiagonal matrix are desired. DLASD6 computes the SVD as follows: ( D1(in) 0 0 0 ) B = U(in) * ( Z1' a Z2' b ) * VT(in) ( 0 0 D2(in) 0 ) = U(out) * ( D(out) 0) * VT(out) where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros elsewhere; and the entry b is empty if SQRE = 0. The singular values of B can be computed using D1, D2, the first components of all the right singular vectors of the lower block, and the last components of all the right singular vectors of the upper block. These components are stored and updated in VF and VL, respectively, in DLASD6. Hence U and VT are not explicitly referenced. The singular values are stored in D. The algorithm consists of two stages: The first stage consists of deflating the size of the problem when there are multiple singular values or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLASD7. The second stage consists of calculating the updated singular values. This is done by finding the roots of the secular equation via the routine DLASD4 (as called by DLASD8). This routine also updates VF and VL and computes the distances between the updated singular values and the old singular values. DLASD6 is called from DLASDA. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form: = 0: Compute singular values only. = 1: Compute singular vectors in factored form as well. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ). On entry D(1:NL,1:NL) contains the singular values of the upper block, and D(NL+2:N) contains the singular values of the lower block. On exit D(1:N) contains the singular values of the modified matrix. VF (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VF(1:NL+1) contains the first components of all right singular vectors of the upper block; and VF(NL+2:M) contains the first components of all right singular vectors of the lower block. On exit, VF contains the first components of all right singular vectors of the bidiagonal matrix. VL (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VL(1:NL+1) contains the last components of all right singular vectors of the upper block; and VL(NL+2:M) contains the last components of all right singular vectors of the lower block. On exit, VL contains the last components of all right singular vectors of the bidiagonal matrix. ALPHA (input/output) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input/output) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. IDXQ (output) INTEGER array, dimension ( N ) This contains the permutation which will reintegrate the subproblem just solved back into sorted order, i.e. D( IDXQ( I = 1, N ) ) will be in ascending order. PERM (output) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) to be applied to each block. Not referenced if ICOMPQ = 0. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. Not referenced if ICOMPQ = 0. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. Not referenced if ICOMPQ = 0. LDGCOL (input) INTEGER leading dimension of GIVCOL, must be at least N. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value to be used in the corresponding Givens rotation. Not referenced if ICOMPQ = 0. LDGNUM (input) INTEGER The leading dimension of GIVNUM and POLES, must be at least N. POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) On exit, POLES(1,*) is an array containing the new singular values obtained from solving the secular equation, and POLES(2,*) is an array containing the poles in the secular equation. Not referenced if ICOMPQ = 0. DIFL (output) DOUBLE PRECISION array, dimension ( N ) On exit, DIFL(I) is the distance between I-th updated (undeflated) singular value and the I-th (undeflated) old singular value. DIFR (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. On exit, DIFR(I, 1) is the distance between I-th updated (undeflated) singular value and the I+1-th (undeflated) old singular value. If ICOMPQ = 1, DIFR(1:K,2) is an array containing the normalizing factors for the right singular vector matrix. See DLASD8 for details on DIFL and DIFR. Z (output) DOUBLE PRECISION array, dimension ( M ) The first elements of this array contain the components of the deflation-adjusted updating row vector. K (output) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. C (output) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (output) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M ) IWORK (workspace) INTEGER array, dimension ( 3 * N ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --vf; --vl; --idxq; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; poles_dim1 = *ldgnum; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; --difl; --difr; --z__; --work; --iwork; /* Function Body */ *info = 0; n = *nl + *nr + 1; m = n + *sqre; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldgcol < n) { *info = -14; } else if (*ldgnum < n) { *info = -16; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD6", &i__1); return 0; } /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLASD7 and DLASD8. */ isigma = 1; iw = isigma + n; ivfw = iw + m; ivlw = ivfw + m; idx = 1; idxc = idx + n; idxp = idxc + n; /* Scale. Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); orgnrm = max(d__1,d__2); d__[*nl + 1] = 0.; i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { orgnrm = (d__1 = d__[i__], abs(d__1)); } /* L10: */ } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); *alpha /= orgnrm; *beta /= orgnrm; /* Sort and Deflate singular values. */ dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], & work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], & iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s, info); /* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */ dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1], ldgnum, &work[isigma], &work[iw], info); /* Save the poles if ICOMPQ = 1. */ if (*icompq == 1) { dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1); dcopy_(k, &work[isigma], &c__1, &poles[(poles_dim1 << 1) + 1], &c__1); } /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); /* Prepare the IDXQ sorting permutation. */ n1 = *k; n2 = n - *k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); return 0; /* End of DLASD6 */ } /* dlasd6_ */ /* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl, doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal * dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *c__, doublereal *s, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxi, idxj; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer i__, j, m, n, idxjp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jprev, k2; static doublereal z1; static integer jp; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static doublereal hlftol, eps, tau, tol; static integer nlp1, nlp2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD7 merges the two sets of singular values together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more singular values are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. DLASD7 is called from DLASD6. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in compact form, as follows: = 0: Compute singular values only. = 1: Compute singular vectors of upper bidiagonal matrix in compact form. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (output) INTEGER Contains the dimension of the non-deflated matrix, this is the order of the related secular equation. 1 <= K <=N. D (input/output) DOUBLE PRECISION array, dimension ( N ) On entry D contains the singular values of the two submatrices to be combined. On exit D contains the trailing (N-K) updated singular values (those which were deflated) sorted into increasing order. Z (output) DOUBLE PRECISION array, dimension ( M ) On exit Z contains the updating row vector in the secular equation. ZW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for Z. VF (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VF(1:NL+1) contains the first components of all right singular vectors of the upper block; and VF(NL+2:M) contains the first components of all right singular vectors of the lower block. On exit, VF contains the first components of all right singular vectors of the bidiagonal matrix. VFW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for VF. VL (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VL(1:NL+1) contains the last components of all right singular vectors of the upper block; and VL(NL+2:M) contains the last components of all right singular vectors of the lower block. On exit, VL contains the last components of all right singular vectors of the bidiagonal matrix. VLW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for VL. ALPHA (input) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. DSIGMA (output) DOUBLE PRECISION array, dimension ( N ) Contains a copy of the diagonal elements (K-1 singular values and one zero) in the secular equation. IDX (workspace) INTEGER array, dimension ( N ) This will contain the permutation used to sort the contents of D into ascending order. IDXP (workspace) INTEGER array, dimension ( N ) This will contain the permutation used to place deflated values of D at the end of the array. On output IDXP(2:K) points to the nondeflated D-values and IDXP(K+1:N) points to the deflated singular values. IDXQ (input) INTEGER array, dimension ( N ) This contains the permutation which separately sorts the two sub-problems in D into ascending order. Note that entries in the first half of this permutation must first be moved one position backward; and entries in the second half must first have NL+1 added to their values. PERM (output) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) to be applied to each singular block. Not referenced if ICOMPQ = 0. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. Not referenced if ICOMPQ = 0. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. Not referenced if ICOMPQ = 0. LDGCOL (input) INTEGER The leading dimension of GIVCOL, must be at least N. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value to be used in the corresponding Givens rotation. Not referenced if ICOMPQ = 0. LDGNUM (input) INTEGER The leading dimension of GIVNUM, must be at least N. C (output) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (output) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; --zw; --vf; --vfw; --vl; --vlw; --dsigma; --idx; --idxp; --idxq; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; /* Function Body */ *info = 0; n = *nl + *nr + 1; m = n + *sqre; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldgcol < n) { *info = -22; } else if (*ldgnum < n) { *info = -24; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD7", &i__1); return 0; } nlp1 = *nl + 1; nlp2 = *nl + 2; if (*icompq == 1) { *givptr = 0; } /* Generate the first part of the vector Z and move the singular values in the first part of D one position backward. */ z1 = *alpha * vl[nlp1]; vl[nlp1] = 0.; tau = vf[nlp1]; for (i__ = *nl; i__ >= 1; --i__) { z__[i__ + 1] = *alpha * vl[i__]; vl[i__] = 0.; vf[i__ + 1] = vf[i__]; d__[i__ + 1] = d__[i__]; idxq[i__ + 1] = idxq[i__] + 1; /* L10: */ } vf[1] = tau; /* Generate the second part of the vector Z. */ i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { z__[i__] = *beta * vf[i__]; vf[i__] = 0.; /* L20: */ } /* Sort the singular values into increasing order */ i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { idxq[i__] += nlp1; /* L30: */ } /* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */ i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dsigma[i__] = d__[idxq[i__]]; zw[i__] = z__[idxq[i__]]; vfw[i__] = vf[idxq[i__]]; vlw[i__] = vl[idxq[i__]]; /* L40: */ } dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { idxi = idx[i__] + 1; d__[i__] = dsigma[idxi]; z__[i__] = zw[idxi]; vf[i__] = vfw[idxi]; vl[i__] = vlw[idxi]; /* L50: */ } /* Calculate the allowable deflation tolerence */ eps = EPSILON; /* Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); tol = max(d__1,d__2); /* Computing MAX */ d__2 = (d__1 = d__[n], abs(d__1)); tol = eps * 64. * max(d__2,tol); /* There are 2 kinds of deflation -- first a value in the z-vector is small, second two (or more) singular values are very close together (their difference is small). If the value in the z-vector is small, we simply permute the array so that the corresponding singular value is moved to the end. If two values in the D-vector are close, we perform a two-sided rotation designed to make one of the corresponding z-vector entries zero, and then permute the array so that the deflated singular value is moved to the end. If there are multiple singular values then the problem deflates. Here the number of equal singular values are found. As each equal singular value is found, an elementary reflector is computed to rotate the corresponding singular subspace so that the corresponding components of Z are zero in this new basis. */ *k = 1; k2 = n + 1; i__1 = n; for (j = 2; j <= i__1; ++j) { if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; if (j == n) { goto L100; } } else { jprev = j; goto L70; } /* L60: */ } L70: j = jprev; L80: ++j; if (j > n) { goto L90; } if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; } else { /* Check if singular values are close enough to allow deflation. */ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { /* Deflation is possible. */ *s = z__[jprev]; *c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(c__, s); z__[j] = tau; z__[jprev] = 0.; *c__ /= tau; *s = -(*s) / tau; /* Record the appropriate Givens rotation */ if (*icompq == 1) { ++(*givptr); idxjp = idxq[idx[jprev] + 1]; idxj = idxq[idx[j] + 1]; if (idxjp <= nlp1) { --idxjp; } if (idxj <= nlp1) { --idxj; } givcol[*givptr + (givcol_dim1 << 1)] = idxjp; givcol[*givptr + givcol_dim1] = idxj; givnum[*givptr + (givnum_dim1 << 1)] = *c__; givnum[*givptr + givnum_dim1] = *s; } drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s); drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s); --k2; idxp[k2] = jprev; jprev = j; } else { ++(*k); zw[*k] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; jprev = j; } } goto L80; L90: /* Record the last singular value. */ ++(*k); zw[*k] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; L100: /* Sort the singular values into DSIGMA. The singular values which were not deflated go into the first K slots of DSIGMA, except that DSIGMA(1) is treated separately. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; dsigma[j] = d__[jp]; vfw[j] = vf[jp]; vlw[j] = vl[jp]; /* L110: */ } if (*icompq == 1) { i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; perm[j] = idxq[idx[jp] + 1]; if (perm[j] <= nlp1) { --perm[j]; } /* L120: */ } } /* The deflated singular values go back into the last N - K slots of D. */ i__1 = n - *k; dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); /* Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and VL(M). */ dsigma[1] = 0.; hlftol = tol / 2.; if (abs(dsigma[2]) <= hlftol) { dsigma[2] = hlftol; } if (m > n) { z__[1] = dlapy2_(&z1, &z__[m]); if (z__[1] <= tol) { *c__ = 1.; *s = 0.; z__[1] = tol; } else { *c__ = z1 / z__[1]; *s = -z__[m] / z__[1]; } drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s); drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s); } else { if (abs(z1) <= tol) { z__[1] = tol; } else { z__[1] = z1; } } /* Restore Z, VF, and VL. */ i__1 = *k - 1; dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1); i__1 = n - 1; dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1); i__1 = n - 1; dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1); return 0; /* End of DLASD7 */ } /* dlasd7_ */ /* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__, doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl, doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal * work, integer *info) { /* System generated locals */ integer difr_dim1, difr_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer iwk2i, iwk3i, i__, j; static doublereal diflj, difrj, dsigj; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static doublereal dj; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal dsigjp, rho; static integer iwk1, iwk2, iwk3; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD8 finds the square roots of the roots of the secular equation, as defined by the values in DSIGMA and Z. It makes the appropriate calls to DLASD4, and stores, for each element in D, the distance to its two nearest poles (elements in DSIGMA). It also updates the arrays VF and VL, the first and last components of all the right singular vectors of the original bidiagonal matrix. DLASD8 is called from DLASD6. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form in the calling routine: = 0: Compute singular values only. = 1: Compute singular vectors in factored form as well. K (input) INTEGER The number of terms in the rational function to be solved by DLASD4. K >= 1. D (output) DOUBLE PRECISION array, dimension ( K ) On output, D contains the updated singular values. Z (input) DOUBLE PRECISION array, dimension ( K ) The first K elements of this array contain the components of the deflation-adjusted updating row vector. VF (input/output) DOUBLE PRECISION array, dimension ( K ) On entry, VF contains information passed through DBEDE8. On exit, VF contains the first K components of the first components of all right singular vectors of the bidiagonal matrix. VL (input/output) DOUBLE PRECISION array, dimension ( K ) On entry, VL contains information passed through DBEDE8. On exit, VL contains the first K components of the last components of all right singular vectors of the bidiagonal matrix. DIFL (output) DOUBLE PRECISION array, dimension ( K ) On exit, DIFL(I) = D(I) - DSIGMA(I). DIFR (output) DOUBLE PRECISION array, dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and dimension ( K ) if ICOMPQ = 0. On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not defined and will not be referenced. If ICOMPQ = 1, DIFR(1:K,2) is an array containing the normalizing factors for the right singular vector matrix. LDDIFR (input) INTEGER The leading dimension of DIFR, must be at least K. DSIGMA (input) DOUBLE PRECISION array, dimension ( K ) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; --vf; --vl; --difl; difr_dim1 = *lddifr; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; --dsigma; --work; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*k < 1) { *info = -2; } else if (*lddifr < *k) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD8", &i__1); return 0; } /* Quick return if possible */ if (*k == 1) { d__[1] = abs(z__[1]); difl[1] = d__[1]; if (*icompq == 1) { difl[2] = 1.; difr[(difr_dim1 << 1) + 1] = 1.; } return 0; } /* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), which on any of these machines zeros out the bottommost bit of DSIGMA(I) if it is 1; this makes the subsequent subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DSIGMA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DSIGMA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DSIGMA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; /* L10: */ } /* Book keeping. */ iwk1 = 1; iwk2 = iwk1 + *k; iwk3 = iwk2 + *k; iwk2i = iwk2 - 1; iwk3i = iwk3 - 1; /* Normalize Z. */ rho = dnrm2_(k, &z__[1], &c__1); dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); rho *= rho; /* Initialize WORK(IWK3). */ dlaset_("A", k, &c__1, &c_b15, &c_b15, &work[iwk3], k); /* Compute the updated singular values, the arrays DIFL, DIFR, and the updated Z. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[ iwk2], info); /* If the root finder fails, the computation is terminated. */ if (*info != 0) { return 0; } work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j]; difl[j] = -work[j]; difr[j + difr_dim1] = -work[j + 1]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ j]); /* L20: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ j]); /* L30: */ } /* L40: */ } /* Compute updated Z. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1))); z__[i__] = d_sign(&d__2, &z__[i__]); /* L50: */ } /* Update VF and VL. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { diflj = difl[j]; dj = d__[j]; dsigj = -dsigma[j]; if (j < *k) { difrj = -difr[j + difr_dim1]; dsigjp = -dsigma[j + 1]; } work[j] = -z__[j] / diflj / (dsigma[j] + dj); i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / ( dsigma[i__] + dj); /* L60: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) / (dsigma[i__] + dj); /* L70: */ } temp = dnrm2_(k, &work[1], &c__1); work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp; work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp; if (*icompq == 1) { difr[j + (difr_dim1 << 1)] = temp; } /* L80: */ } dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1); dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1); return 0; /* End of DLASD8 */ } /* dlasd8_ */ /* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal *poles, integer *givptr, integer *givcol, integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1, difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static doublereal beta; static integer idxq, nlvl, i__, j, m; static doublereal alpha; static integer inode, ndiml, ndimr, idxqi, itemp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer sqrei, i1; extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer ic, nwork1, lf, nd, nwork2, ll, nl, vf, nr, vl; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), dlaset_( char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer im1, smlszp, ncc, nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Using a divide and conquer approach, DLASDA computes the singular value decomposition (SVD) of a real upper bidiagonal N-by-M matrix B with diagonal D and offdiagonal E, where M = N + SQRE. The algorithm computes the singular values in the SVD B = U * S * VT. The orthogonal matrices U and VT are optionally computed in compact form. A related subroutine, DLASD0, computes the singular values and the singular vectors in explicit form. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in compact form, as follows = 0: Compute singular values only. = 1: Compute singular vectors of upper bidiagonal matrix in compact form. SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The row dimension of the upper bidiagonal matrix. This is also the dimension of the main diagonal array D. SQRE (input) INTEGER Specifies the column dimension of the bidiagonal matrix. = 0: The bidiagonal matrix has column dimension M = N; = 1: The bidiagonal matrix has column dimension M = N + 1. D (input/output) DOUBLE PRECISION array, dimension ( N ) On entry D contains the main diagonal of the bidiagonal matrix. On exit D, if INFO = 0, contains its singular values. E (input) DOUBLE PRECISION array, dimension ( M-1 ) Contains the subdiagonal entries of the bidiagonal matrix. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left singular vector matrices of all subproblems at the bottom level. LDU (input) INTEGER, LDU = > N. The leading dimension of arrays U, VT, DIFL, DIFR, POLES, GIVNUM, and Z. VT (output) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right singular vector matrices of all subproblems at the bottom level. K (output) INTEGER array, dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th secular equation on the computation tree. DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ), where NLVL = floor(log_2 (N/SMLSIZ))). DIFR (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1) record distances between singular values on the I-th level and singular values on the (I -1)-th level, and DIFR(1:N, 2 * I ) contains the normalizing factors for the right singular vector matrix. See DLASD8 for details. Z (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. The first K elements of Z(1, I) contain the components of the deflation-adjusted updating row vector for subproblems on the I-th level. POLES (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and POLES(1, 2*I) contain the new and old singular values involved in the secular equations on the I-th level. GIVPTR (output) INTEGER array, dimension ( N ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records the number of Givens rotations performed on the I-th problem on the computation tree. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations of Givens rotations performed on the I-th level on the computation tree. LDGCOL (input) INTEGER, LDGCOL = > N. The leading dimension of arrays GIVCOL and PERM. PERM (output) INTEGER array, dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records permutations done on the I-th level of the computation tree. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S- values of Givens rotations performed on the I-th level on the computation tree. C (output) DOUBLE PRECISION array, dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 and the I-th subproblem is not square, on exit, C( I ) contains the C-value of a Givens rotation related to the right null space of the I-th subproblem. S (output) DOUBLE PRECISION array, dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 and the I-th subproblem is not square, on exit, S( I ) contains the S-value of a Givens rotation related to the right null space of the I-th subproblem. WORK (workspace) DOUBLE PRECISION array, dimension (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)). IWORK (workspace) INTEGER array. Dimension must be at least (7 * N). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; givnum_dim1 = *ldu; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; poles_dim1 = *ldu; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; z_dim1 = *ldu; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; difr_dim1 = *ldu; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; difl_dim1 = *ldu; difl_offset = 1 + difl_dim1 * 1; difl -= difl_offset; vt_dim1 = *ldu; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; --k; --givptr; perm_dim1 = *ldgcol; perm_offset = 1 + perm_dim1 * 1; perm -= perm_offset; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; --c__; --s; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*smlsiz < 3) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldu < *n + *sqre) { *info = -8; } else if (*ldgcol < *n) { *info = -17; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASDA", &i__1); return 0; } m = *n + *sqre; /* If the input matrix is too small, call DLASDQ to find the SVD. */ if (*n <= *smlsiz) { if (*icompq == 0) { dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, & work[1], info); } else { dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset] , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info); } return 0; } /* Book-keeping and set up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; idxq = ndimr + *n; iwk = idxq + *n; ncc = 0; nru = 0; smlszp = *smlsiz + 1; vf = 1; vl = vf + m; nwork1 = vl + m; nwork2 = nwork1 + smlszp * smlszp; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* for the nodes on bottom level of the tree, solve their subproblems by DLASDQ. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nlp1 = nl + 1; nr = iwork[ndimr + i1]; nlf = ic - nl; nrf = ic + 1; idxqi = idxq + nlf - 2; vfi = vf + nlf - 1; vli = vl + nlf - 1; sqrei = 1; if (*icompq == 0) { dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &work[nwork1], &smlszp); dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], & work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2], &nl, &work[nwork2], info); itemp = nwork1 + nl * smlszp; dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1); dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1); } else { dlaset_("A", &nl, &nl, &c_b29, &c_b15, &u[nlf + u_dim1], ldu); dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &vt[nlf + vt_dim1], ldu); dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], & vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf + u_dim1], ldu, &work[nwork1], info); dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1); dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1) ; } if (*info != 0) { return 0; } i__2 = nl; for (j = 1; j <= i__2; ++j) { iwork[idxqi + j] = j; /* L10: */ } if (i__ == nd && *sqre == 0) { sqrei = 0; } else { sqrei = 1; } idxqi += nlp1; vfi += nlp1; vli += nlp1; nrp1 = nr + sqrei; if (*icompq == 0) { dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &work[nwork1], &smlszp); dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], & work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2], &nr, &work[nwork2], info); itemp = nwork1 + (nrp1 - 1) * smlszp; dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1); dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1); } else { dlaset_("A", &nr, &nr, &c_b29, &c_b15, &u[nrf + u_dim1], ldu); dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &vt[nrf + vt_dim1], ldu); dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], & vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf + u_dim1], ldu, &work[nwork1], info); dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1); dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1) ; } if (*info != 0) { return 0; } i__2 = nr; for (j = 1; j <= i__2; ++j) { iwork[idxqi + j] = j; /* L20: */ } /* L30: */ } /* Now conquer each subproblem bottom-up. */ j = pow_ii(&c__2, &nlvl); for (lvl = nlvl; lvl >= 1; --lvl) { lvl2 = (lvl << 1) - 1; /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; if (i__ == ll) { sqrei = *sqre; } else { sqrei = 1; } vfi = vf + nlf - 1; vli = vl + nlf - 1; idxqi = idxq + nlf - 1; alpha = d__[ic]; beta = e[ic]; if (*icompq == 0) { dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & work[vli], &alpha, &beta, &iwork[idxqi], &perm[ perm_offset], &givptr[1], &givcol[givcol_offset], ldgcol, &givnum[givnum_offset], ldu, &poles[ poles_offset], &difl[difl_offset], &difr[difr_offset], &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1], &iwork[iwk], info); } else { --j; dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf + lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], & difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j], &s[j], &work[nwork1], &iwork[iwk], info); } if (*info != 0) { return 0; } /* L40: */ } /* L50: */ } return 0; /* End of DLASDA */ } /* dlasda_ */ /* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer * ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt, integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; /* Local variables */ static integer isub; static doublereal smin; static integer sqre1, i__, j; static doublereal r__; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer * , doublereal *, integer *); static integer iuplo; static doublereal cs, sn; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *), dbdsqr_(char *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static logical rotate; static integer np1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASDQ computes the singular value decomposition (SVD) of a real (upper or lower) bidiagonal matrix with diagonal D and offdiagonal E, accumulating the transformations if desired. Letting B denote the input bidiagonal matrix, the algorithm computes orthogonal matrices Q and P such that B = Q * S * P' (P' denotes the transpose of P). The singular values S are overwritten on D. The input matrix U is changed to U * Q if desired. The input matrix VT is changed to P' * VT if desired. The input matrix C is changed to Q' * C if desired. See "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, LAPACK Working Note #3, for a detailed description of the algorithm. Arguments ========= UPLO (input) CHARACTER*1 On entry, UPLO specifies whether the input bidiagonal matrix is upper or lower bidiagonal, and wether it is square are not. UPLO = 'U' or 'u' B is upper bidiagonal. UPLO = 'L' or 'l' B is lower bidiagonal. SQRE (input) INTEGER = 0: then the input matrix is N-by-N. = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and (N+1)-by-N if UPLU = 'L'. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. N (input) INTEGER On entry, N specifies the number of rows and columns in the matrix. N must be at least 0. NCVT (input) INTEGER On entry, NCVT specifies the number of columns of the matrix VT. NCVT must be at least 0. NRU (input) INTEGER On entry, NRU specifies the number of rows of the matrix U. NRU must be at least 0. NCC (input) INTEGER On entry, NCC specifies the number of columns of the matrix C. NCC must be at least 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the diagonal entries of the bidiagonal matrix whose SVD is desired. On normal exit, D contains the singular values in ascending order. E (input/output) DOUBLE PRECISION array. dimension is (N-1) if SQRE = 0 and N if SQRE = 1. On entry, the entries of E contain the offdiagonal entries of the bidiagonal matrix whose SVD is desired. On normal exit, E will contain 0. If the algorithm does not converge, D and E will contain the diagonal and superdiagonal entries of a bidiagonal matrix orthogonally equivalent to the one given as input. VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) On entry, contains a matrix which on exit has been premultiplied by P', dimension N-by-NCVT if SQRE = 0 and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0). LDVT (input) INTEGER On entry, LDVT specifies the leading dimension of VT as declared in the calling (sub) program. LDVT must be at least 1. If NCVT is nonzero LDVT must also be at least N. U (input/output) DOUBLE PRECISION array, dimension (LDU, N) On entry, contains a matrix which on exit has been postmultiplied by Q, dimension NRU-by-N if SQRE = 0 and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0). LDU (input) INTEGER On entry, LDU specifies the leading dimension of U as declared in the calling (sub) program. LDU must be at least max( 1, NRU ) . C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) On entry, contains an N-by-NCC matrix which on exit has been premultiplied by Q' dimension N-by-NCC if SQRE = 0 and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0). LDC (input) INTEGER On entry, LDC specifies the leading dimension of C as declared in the calling (sub) program. LDC must be at least 1. If NCC is nonzero, LDC must also be at least N. WORK (workspace) DOUBLE PRECISION array, dimension (4*N) Workspace. Only referenced if one of NCVT, NRU, or NCC is nonzero, and if N is at least 2. INFO (output) INTEGER On exit, a value of 0 indicates a successful exit. If INFO < 0, argument number -INFO is illegal. If INFO > 0, the algorithm did not converge, and INFO specifies how many superdiagonals did not converge. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; iuplo = 0; if (lsame_(uplo, "U")) { iuplo = 1; } if (lsame_(uplo, "L")) { iuplo = 2; } if (iuplo == 0) { *info = -1; } else if (*sqre < 0 || *sqre > 1) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ncvt < 0) { *info = -4; } else if (*nru < 0) { *info = -5; } else if (*ncc < 0) { *info = -6; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -10; } else if (*ldu < max(1,*nru)) { *info = -12; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -14; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASDQ", &i__1); return 0; } if (*n == 0) { return 0; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; np1 = *n + 1; sqre1 = *sqre; /* If matrix non-square upper bidiagonal, rotate to be lower bidiagonal. The rotations are on the right. */ if (iuplo == 1 && sqre1 == 1) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (rotate) { work[i__] = cs; work[*n + i__] = sn; } /* L10: */ } dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); d__[*n] = r__; e[*n] = 0.; if (rotate) { work[*n] = cs; work[*n + *n] = sn; } iuplo = 2; sqre1 = 0; /* Update singular vectors if desired. */ if (*ncvt > 0) { dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[ vt_offset], ldvt); } } /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left. */ if (iuplo == 2) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (rotate) { work[i__] = cs; work[*n + i__] = sn; } /* L20: */ } /* If matrix (N+1)-by-N lower bidiagonal, one additional rotation is needed. */ if (sqre1 == 1) { dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); d__[*n] = r__; if (rotate) { work[*n] = cs; work[*n + *n] = sn; } } /* Update singular vectors if desired. */ if (*nru > 0) { if (sqre1 == 0) { dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[ u_offset], ldu); } else { dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[ u_offset], ldu); } } if (*ncc > 0) { if (sqre1 == 0) { dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[ c_offset], ldc); } else { dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[ c_offset], ldc); } } } /* Call DBDSQR to compute the SVD of the reduced real N-by-N upper bidiagonal matrix. */ dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[ u_offset], ldu, &c__[c_offset], ldc, &work[1], info); /* Sort the singular values into ascending order (insertion sort on singular values, but only one transposition per singular vector) */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I). */ isub = i__; smin = d__[i__]; i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { if (d__[j] < smin) { isub = j; smin = d__[j]; } /* L30: */ } if (isub != i__) { /* Swap singular values and vectors. */ d__[isub] = d__[i__]; d__[i__] = smin; if (*ncvt > 0) { dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1], ldvt); } if (*nru > 0) { dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1] , &c__1); } if (*ncc > 0) { dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc) ; } } /* L40: */ } return 0; /* End of DLASDQ */ } /* dlasdq_ */ /* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer * inode, integer *ndiml, integer *ndimr, integer *msub) { /* System generated locals */ integer i__1, i__2; /* Builtin functions */ double log(doublereal); /* Local variables */ static integer maxn; static doublereal temp; static integer nlvl, llst, i__, ncrnt, il, ir; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASDT creates a tree of subproblems for bidiagonal divide and conquer. Arguments ========= N (input) INTEGER On entry, the number of diagonal elements of the bidiagonal matrix. LVL (output) INTEGER On exit, the number of levels on the computation tree. ND (output) INTEGER On exit, the number of nodes on the tree. INODE (output) INTEGER array, dimension ( N ) On exit, centers of subproblems. NDIML (output) INTEGER array, dimension ( N ) On exit, row dimensions of left children. NDIMR (output) INTEGER array, dimension ( N ) On exit, row dimensions of right children. MSUB (input) INTEGER. On entry, the maximum row dimension each subproblem at the bottom of the tree can be of. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Find the number of levels on the tree. */ /* Parameter adjustments */ --ndimr; --ndiml; --inode; /* Function Body */ maxn = max(1,*n); temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.); *lvl = (integer) temp + 1; i__ = *n / 2; inode[1] = i__ + 1; ndiml[1] = i__; ndimr[1] = *n - i__ - 1; il = 0; ir = 1; llst = 1; i__1 = *lvl - 1; for (nlvl = 1; nlvl <= i__1; ++nlvl) { /* Constructing the tree at (NLVL+1)-st level. The number of nodes created on this level is LLST * 2. */ i__2 = llst - 1; for (i__ = 0; i__ <= i__2; ++i__) { il += 2; ir += 2; ncrnt = llst + i__; ndiml[il] = ndiml[ncrnt] / 2; ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1; inode[il] = inode[ncrnt] - ndimr[il] - 1; ndiml[ir] = ndimr[ncrnt] / 2; ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1; inode[ir] = inode[ncrnt] + ndiml[ir] + 1; /* L10: */ } llst <<= 1; /* L20: */ } *nd = (llst << 1) - 1; return 0; /* End of DLASDT */ } /* dlasdt_ */ /* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal * alpha, doublereal *beta, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASET initializes an m-by-n matrix A to BETA on the diagonal and ALPHA on the offdiagonals. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be set. = 'U': Upper triangular part is set; the strictly lower triangular part of A is not changed. = 'L': Lower triangular part is set; the strictly upper triangular part of A is not changed. Otherwise: All of the matrix A is set. M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. ALPHA (input) DOUBLE PRECISION The constant to which the offdiagonal elements are to be set. BETA (input) DOUBLE PRECISION The constant to which the diagonal elements are to be set. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On exit, the leading m-by-n submatrix of A is set as follows: if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n, if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n, otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j, and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ if (lsame_(uplo, "U")) { /* Set the strictly upper triangular or trapezoidal part of the array to ALPHA. */ i__1 = *n; for (j = 2; j <= i__1; ++j) { /* Computing MIN */ i__3 = j - 1; i__2 = min(i__3,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L10: */ } /* L20: */ } } else if (lsame_(uplo, "L")) { /* Set the strictly lower triangular or trapezoidal part of the array to ALPHA. */ i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L30: */ } /* L40: */ } } else { /* Set the leading m-by-n submatrix to ALPHA. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L50: */ } /* L60: */ } } /* Set the first min(M,N) diagonal elements to BETA. */ i__1 = min(*m,*n); for (i__ = 1; i__ <= i__1; ++i__) { a[i__ + i__ * a_dim1] = *beta; /* L70: */ } return 0; /* End of DLASET */ } /* dlaset_ */ /* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e, doublereal *work, integer *info) { /* System generated locals */ integer i__1, i__2; doublereal d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static integer i__; static doublereal scale; static integer iinfo; static doublereal sigmn; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal sigmx; extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_( char *, integer *, doublereal *, integer *); static doublereal eps; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ1 computes the singular values of a real N-by-N bidiagonal matrix with diagonal D and off-diagonal E. The singular values are computed to high relative accuracy, in the absence of denormalization, underflow and overflow. The algorithm was first presented in "Accurate singular values and differential qd algorithms" by K. V. Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230, 1994, and the present implementation is described in "An implementation of the dqds Algorithm (Positive Case)", LAPACK Working Note. Arguments ========= N (input) INTEGER The number of rows and columns in the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the diagonal elements of the bidiagonal matrix whose SVD is desired. On normal exit, D contains the singular values in decreasing order. E (input/output) DOUBLE PRECISION array, dimension (N) On entry, elements E(1:N-1) contain the off-diagonal elements of the bidiagonal matrix whose SVD is desired. On exit, E is overwritten. WORK (workspace) DOUBLE PRECISION array, dimension (4*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm failed = 1, a split was marked by a positive value in E = 2, current block of Z not diagonalized after 30*N iterations (in inner while loop) = 3, termination criterion of outer while loop not met (program created more than N unreduced blocks) ===================================================================== */ /* Parameter adjustments */ --work; --e; --d__; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; i__1 = -(*info); xerbla_("DLASQ1", &i__1); return 0; } else if (*n == 0) { return 0; } else if (*n == 1) { d__[1] = abs(d__[1]); return 0; } else if (*n == 2) { dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx); d__[1] = sigmx; d__[2] = sigmn; return 0; } /* Estimate the largest singular value. */ sigmx = 0.; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = (d__1 = d__[i__], abs(d__1)); /* Computing MAX */ d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1)); sigmx = max(d__2,d__3); /* L10: */ } d__[*n] = (d__1 = d__[*n], abs(d__1)); /* Early return if SIGMX is zero (matrix is already diagonal). */ if (sigmx == 0.) { dlasrt_("D", n, &d__[1], &iinfo); return 0; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = sigmx, d__2 = d__[i__]; sigmx = max(d__1,d__2); /* L20: */ } /* Copy D and E into WORK (in the Z format) and scale (squaring the input data makes scaling by a power of the radix pointless). */ eps = PRECISION; safmin = SAFEMINIMUM; scale = sqrt(eps / safmin); dcopy_(n, &d__[1], &c__1, &work[1], &c__2); i__1 = *n - 1; dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2); i__1 = (*n << 1) - 1; i__2 = (*n << 1) - 1; dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2, &iinfo); /* Compute the q's and e's. */ i__1 = (*n << 1) - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing 2nd power */ d__1 = work[i__]; work[i__] = d__1 * d__1; /* L30: */ } work[*n * 2] = 0.; dlasq2_(n, &work[1], info); if (*info == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = sqrt(work[i__]); /* L40: */ } dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, & iinfo); } return 0; /* End of DLASQ1 */ } /* dlasq1_ */ /* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static logical ieee; static integer nbig; static doublereal dmin__, emin, emax; static integer ndiv, iter; static doublereal qmin, temp, qmax, zmax; static integer splt; static doublereal dmin1, dmin2, d__, e; static integer k; static doublereal s, t; static integer nfail; static doublereal desig, trace, sigma; static integer iinfo, i0, i4, n0, ttype; extern /* Subroutine */ int dlazq3_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, logical *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal dn; static integer pp, iwhila, iwhilb; static doublereal oldemn, safmin; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static doublereal dn1, dn2, eps, tau, tol; static integer ipn4; static doublereal tol2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Modified to call DLAZQ3 in place of DLASQ3, 13 Feb 03, SJH. Purpose ======= DLASQ2 computes all the eigenvalues of the symmetric positive definite tridiagonal matrix associated with the qd array Z to high relative accuracy are computed to high relative accuracy, in the absence of denormalization, underflow and overflow. To see the relation of Z to the tridiagonal matrix, let L be a unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and let U be an upper bidiagonal matrix with 1's above and diagonal Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the symmetric tridiagonal to which it is similar. Note : DLASQ2 defines a logical variable, IEEE, which is true on machines which follow ieee-754 floating-point standard in their handling of infinities and NaNs, and false otherwise. This variable is passed to DLAZQ3. Arguments ========= N (input) INTEGER The number of rows and columns in the matrix. N >= 0. Z (workspace) DOUBLE PRECISION array, dimension ( 4*N ) On entry Z holds the qd array. On exit, entries 1 to N hold the eigenvalues in decreasing order, Z( 2*N+1 ) holds the trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 ) holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of shifts that failed. INFO (output) INTEGER = 0: successful exit < 0: if the i-th argument is a scalar and had an illegal value, then INFO = -i, if the i-th argument is an array and the j-entry had an illegal value, then INFO = -(i*100+j) > 0: the algorithm failed = 1, a split was marked by a positive value in E = 2, current block of Z not diagonalized after 30*N iterations (in inner while loop) = 3, termination criterion of outer while loop not met (program created more than N unreduced blocks) Further Details =============== Local Variables: I0:N0 defines a current unreduced segment of Z. The shifts are accumulated in SIGMA. Iteration count is in ITER. Ping-pong is controlled by PP (alternates between 0 and 1). ===================================================================== Test the input arguments. (in case DLASQ2 is not called by DLASQ1) */ /* Parameter adjustments */ --z__; /* Function Body */ *info = 0; eps = PRECISION; safmin = SAFEMINIMUM; tol = eps * 100.; /* Computing 2nd power */ d__1 = tol; tol2 = d__1 * d__1; if (*n < 0) { *info = -1; xerbla_("DLASQ2", &c__1); return 0; } else if (*n == 0) { return 0; } else if (*n == 1) { /* 1-by-1 case. */ if (z__[1] < 0.) { *info = -201; xerbla_("DLASQ2", &c__2); } return 0; } else if (*n == 2) { /* 2-by-2 case. */ if (z__[2] < 0. || z__[3] < 0.) { *info = -2; xerbla_("DLASQ2", &c__2); return 0; } else if (z__[3] > z__[1]) { d__ = z__[3]; z__[3] = z__[1]; z__[1] = d__; } z__[5] = z__[1] + z__[2] + z__[3]; if (z__[2] > z__[3] * tol2) { t = (z__[1] - z__[3] + z__[2]) * .5; s = z__[3] * (z__[2] / t); if (s <= t) { s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.))); } else { s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s))); } t = z__[1] + (s + z__[2]); z__[3] *= z__[1] / t; z__[1] = t; } z__[2] = z__[3]; z__[6] = z__[2] + z__[1]; return 0; } /* Check for negative data and compute sums of q's and e's. */ z__[*n * 2] = 0.; emin = z__[2]; qmax = 0.; zmax = 0.; d__ = 0.; e = 0.; i__1 = *n - 1 << 1; for (k = 1; k <= i__1; k += 2) { if (z__[k] < 0.) { *info = -(k + 200); xerbla_("DLASQ2", &c__2); return 0; } else if (z__[k + 1] < 0.) { *info = -(k + 201); xerbla_("DLASQ2", &c__2); return 0; } d__ += z__[k]; e += z__[k + 1]; /* Computing MAX */ d__1 = qmax, d__2 = z__[k]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[k + 1]; emin = min(d__1,d__2); /* Computing MAX */ d__1 = max(qmax,zmax), d__2 = z__[k + 1]; zmax = max(d__1,d__2); /* L10: */ } if (z__[(*n << 1) - 1] < 0.) { *info = -((*n << 1) + 199); xerbla_("DLASQ2", &c__2); return 0; } d__ += z__[(*n << 1) - 1]; /* Computing MAX */ d__1 = qmax, d__2 = z__[(*n << 1) - 1]; qmax = max(d__1,d__2); zmax = max(qmax,zmax); /* Check for diagonality. */ if (e == 0.) { i__1 = *n; for (k = 2; k <= i__1; ++k) { z__[k] = z__[(k << 1) - 1]; /* L20: */ } dlasrt_("D", n, &z__[1], &iinfo); z__[(*n << 1) - 1] = d__; return 0; } trace = d__ + e; /* Check for zero data. */ if (trace == 0.) { z__[(*n << 1) - 1] = 0.; return 0; } /* Check whether the machine is IEEE conformable. */ ieee = ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen) 6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1; /* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */ for (k = *n << 1; k >= 2; k += -2) { z__[k * 2] = 0.; z__[(k << 1) - 1] = z__[k]; z__[(k << 1) - 2] = 0.; z__[(k << 1) - 3] = z__[k - 1]; /* L30: */ } i0 = 1; n0 = *n; /* Reverse the qd-array, if warranted. */ if (z__[(i0 << 2) - 3] * 1.5 < z__[(n0 << 2) - 3]) { ipn4 = i0 + n0 << 2; i__1 = i0 + n0 - 1 << 1; for (i4 = i0 << 2; i4 <= i__1; i4 += 4) { temp = z__[i4 - 3]; z__[i4 - 3] = z__[ipn4 - i4 - 3]; z__[ipn4 - i4 - 3] = temp; temp = z__[i4 - 1]; z__[i4 - 1] = z__[ipn4 - i4 - 5]; z__[ipn4 - i4 - 5] = temp; /* L40: */ } } /* Initial split checking via dqd and Li's test. */ pp = 0; for (k = 1; k <= 2; ++k) { d__ = z__[(n0 << 2) + pp - 3]; i__1 = (i0 << 2) + pp; for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) { if (z__[i4 - 1] <= tol2 * d__) { z__[i4 - 1] = 0.; d__ = z__[i4 - 3]; } else { d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1])); } /* L50: */ } /* dqd maps Z to ZZ plus Li's test. */ emin = z__[(i0 << 2) + pp + 1]; d__ = z__[(i0 << 2) + pp - 3]; i__1 = (n0 - 1 << 2) + pp; for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) { z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1]; if (z__[i4 - 1] <= tol2 * d__) { z__[i4 - 1] = 0.; z__[i4 - (pp << 1) - 2] = d__; z__[i4 - (pp << 1)] = 0.; d__ = z__[i4 + 1]; } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] && safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) { temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2]; z__[i4 - (pp << 1)] = z__[i4 - 1] * temp; d__ *= temp; } else { z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - ( pp << 1) - 2]); d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]); } /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - (pp << 1)]; emin = min(d__1,d__2); /* L60: */ } z__[(n0 << 2) - pp - 2] = d__; /* Now find qmax. */ qmax = z__[(i0 << 2) - pp - 2]; i__1 = (n0 << 2) - pp - 2; for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) { /* Computing MAX */ d__1 = qmax, d__2 = z__[i4]; qmax = max(d__1,d__2); /* L70: */ } /* Prepare for the next iteration on K. */ pp = 1 - pp; /* L80: */ } /* Initialise variables to pass to DLAZQ3 */ ttype = 0; dmin1 = 0.; dmin2 = 0.; dn = 0.; dn1 = 0.; dn2 = 0.; tau = 0.; iter = 2; nfail = 0; ndiv = n0 - i0 << 1; i__1 = *n + 1; for (iwhila = 1; iwhila <= i__1; ++iwhila) { if (n0 < 1) { goto L150; } /* While array unfinished do E(N0) holds the value of SIGMA when submatrix in I0:N0 splits from the rest of the array, but is negated. */ desig = 0.; if (n0 == *n) { sigma = 0.; } else { sigma = -z__[(n0 << 2) - 1]; } if (sigma < 0.) { *info = 1; return 0; } /* Find last unreduced submatrix's top index I0, find QMAX and EMIN. Find Gershgorin-type bound if Q's much greater than E's. */ emax = 0.; if (n0 > i0) { emin = (d__1 = z__[(n0 << 2) - 5], abs(d__1)); } else { emin = 0.; } qmin = z__[(n0 << 2) - 3]; qmax = qmin; for (i4 = n0 << 2; i4 >= 8; i4 += -4) { if (z__[i4 - 5] <= 0.) { goto L100; } if (qmin >= emax * 4.) { /* Computing MIN */ d__1 = qmin, d__2 = z__[i4 - 3]; qmin = min(d__1,d__2); /* Computing MAX */ d__1 = emax, d__2 = z__[i4 - 5]; emax = max(d__1,d__2); } /* Computing MAX */ d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - 5]; emin = min(d__1,d__2); /* L90: */ } i4 = 4; L100: i0 = i4 / 4; /* Store EMIN for passing to DLAZQ3. */ z__[(n0 << 2) - 1] = emin; /* Put -(initial shift) into DMIN. Computing MAX */ d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax); dmin__ = -max(d__1,d__2); /* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */ pp = 0; nbig = (n0 - i0 + 1) * 30; i__2 = nbig; for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) { if (i0 > n0) { goto L130; } /* While submatrix unfinished take a good dqds step. */ dlazq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, & nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, & dn1, &dn2, &tau); pp = 1 - pp; /* When EMIN is very small check for splits. */ if (pp == 0 && n0 - i0 >= 3) { if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 * sigma) { splt = i0 - 1; qmax = z__[(i0 << 2) - 3]; emin = z__[(i0 << 2) - 1]; oldemn = z__[i0 * 4]; i__3 = n0 - 3 << 2; for (i4 = i0 << 2; i4 <= i__3; i4 += 4) { if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= tol2 * sigma) { z__[i4 - 1] = -sigma; splt = i4 / 4; qmax = 0.; emin = z__[i4 + 3]; oldemn = z__[i4 + 4]; } else { /* Computing MAX */ d__1 = qmax, d__2 = z__[i4 + 1]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - 1]; emin = min(d__1,d__2); /* Computing MIN */ d__1 = oldemn, d__2 = z__[i4]; oldemn = min(d__1,d__2); } /* L110: */ } z__[(n0 << 2) - 1] = emin; z__[n0 * 4] = oldemn; i0 = splt + 1; } } /* L120: */ } *info = 2; return 0; /* end IWHILB */ L130: /* L140: */ ; } *info = 3; return 0; /* end IWHILA */ L150: /* Move q's to the front. */ i__1 = *n; for (k = 2; k <= i__1; ++k) { z__[k] = z__[(k << 2) - 3]; /* L160: */ } /* Sort and compute sum of eigenvalues. */ dlasrt_("D", n, &z__[1], &iinfo); e = 0.; for (k = *n; k >= 1; --k) { e += z__[k]; /* L170: */ } /* Store trace, sum(eigenvalues) and information on performance. */ z__[(*n << 1) + 1] = trace; z__[(*n << 1) + 2] = e; z__[(*n << 1) + 3] = (doublereal) iter; /* Computing 2nd power */ i__1 = *n; z__[(*n << 1) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1); z__[(*n << 1) + 5] = nfail * 100. / (doublereal) iter; return 0; /* End of DLASQ2 */ } /* dlasq2_ */ /* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, logical *ieee) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Local variables */ static doublereal emin, temp, d__; static integer j4, j4p2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ5 computes one dqds transform in ping-pong form, one version for IEEE machines another for non IEEE machines. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. EMIN is stored in Z(4*N0) to avoid an extra argument. PP (input) INTEGER PP=0 for ping, PP=1 for pong. TAU (input) DOUBLE PRECISION This is the shift. DMIN (output) DOUBLE PRECISION Minimum value of d. DMIN1 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (output) DOUBLE PRECISION d(N0), the last value of d. DNM1 (output) DOUBLE PRECISION d(N0-1). DNM2 (output) DOUBLE PRECISION d(N0-2). IEEE (input) LOGICAL Flag for IEEE or non IEEE arithmetic. ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ if (*n0 - *i0 - 1 <= 0) { return 0; } j4 = (*i0 << 2) + *pp - 3; emin = z__[j4 + 4]; d__ = z__[j4] - *tau; *dmin__ = d__; *dmin1 = -z__[j4]; if (*ieee) { /* Code for IEEE arithmetic. */ if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; temp = z__[j4 + 1] / z__[j4 - 2]; d__ = d__ * temp - *tau; *dmin__ = min(*dmin__,d__); z__[j4] = z__[j4 - 1] * temp; /* Computing MIN */ d__1 = z__[j4]; emin = min(d__1,emin); /* L10: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; temp = z__[j4 + 2] / z__[j4 - 3]; d__ = d__ * temp - *tau; *dmin__ = min(*dmin__,d__); z__[j4 - 1] = z__[j4] * temp; /* Computing MIN */ d__1 = z__[j4 - 1]; emin = min(d__1,emin); /* L20: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; *dmin__ = min(*dmin__,*dn); } else { /* Code for non IEEE arithmetic. */ if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; if (d__ < 0.) { return 0; } else { z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4]; emin = min(d__1,d__2); /* L30: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; if (d__ < 0.) { return 0; } else { z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau; } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4 - 1]; emin = min(d__1,d__2); /* L40: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; if (*dnm2 < 0.) { return 0; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; if (*dnm1 < 0.) { return 0; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,*dn); } z__[j4 + 2] = *dn; z__[(*n0 << 2) - *pp] = emin; return 0; /* End of DLASQ5 */ } /* dlasq5_ */ /* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Local variables */ static doublereal emin, temp, d__; static integer j4; static doublereal safmin; static integer j4p2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ6 computes one dqd (shift equal to zero) transform in ping-pong form, with protection against underflow and overflow. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. EMIN is stored in Z(4*N0) to avoid an extra argument. PP (input) INTEGER PP=0 for ping, PP=1 for pong. DMIN (output) DOUBLE PRECISION Minimum value of d. DMIN1 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (output) DOUBLE PRECISION d(N0), the last value of d. DNM1 (output) DOUBLE PRECISION d(N0-1). DNM2 (output) DOUBLE PRECISION d(N0-2). ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ if (*n0 - *i0 - 1 <= 0) { return 0; } safmin = SAFEMINIMUM; j4 = (*i0 << 2) + *pp - 3; emin = z__[j4 + 4]; d__ = z__[j4]; *dmin__ = d__; if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; d__ = z__[j4 + 1]; *dmin__ = d__; emin = 0.; } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4 + 1]) { temp = z__[j4 + 1] / z__[j4 - 2]; z__[j4] = z__[j4 - 1] * temp; d__ *= temp; } else { z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]); } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4]; emin = min(d__1,d__2); /* L10: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; if (z__[j4 - 3] == 0.) { z__[j4 - 1] = 0.; d__ = z__[j4 + 2]; *dmin__ = d__; emin = 0.; } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 - 3] < z__[j4 + 2]) { temp = z__[j4 + 2] / z__[j4 - 3]; z__[j4 - 1] = z__[j4] * temp; d__ *= temp; } else { z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]); } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4 - 1]; emin = min(d__1,d__2); /* L20: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; *dnm1 = z__[j4p2 + 2]; *dmin__ = *dnm1; emin = 0.; } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) { temp = z__[j4p2 + 2] / z__[j4 - 2]; z__[j4] = z__[j4p2] * temp; *dnm1 = *dnm2 * temp; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]); } *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; *dn = z__[j4p2 + 2]; *dmin__ = *dn; emin = 0.; } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) { temp = z__[j4p2 + 2] / z__[j4 - 2]; z__[j4] = z__[j4p2] * temp; *dn = *dnm1 * temp; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]); } *dmin__ = min(*dmin__,*dn); z__[j4 + 2] = *dn; z__[(*n0 << 2) - *pp] = emin; return 0; /* End of DLASQ6 */ } /* dlasq6_ */ /* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m, integer *n, doublereal *c__, doublereal *s, doublereal *a, integer * lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static doublereal ctemp, stemp; extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASR applies a sequence of plane rotations to a real matrix A, from either the left or the right. When SIDE = 'L', the transformation takes the form A := P*A and when SIDE = 'R', the transformation takes the form A := A*P**T where P is an orthogonal matrix consisting of a sequence of z plane rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', and P**T is the transpose of P. When DIRECT = 'F' (Forward sequence), then P = P(z-1) * ... * P(2) * P(1) and when DIRECT = 'B' (Backward sequence), then P = P(1) * P(2) * ... * P(z-1) where P(k) is a plane rotation matrix defined by the 2-by-2 rotation R(k) = ( c(k) s(k) ) = ( -s(k) c(k) ). When PIVOT = 'V' (Variable pivot), the rotation is performed for the plane (k,k+1), i.e., P(k) has the form P(k) = ( 1 ) ( ... ) ( 1 ) ( c(k) s(k) ) ( -s(k) c(k) ) ( 1 ) ( ... ) ( 1 ) where R(k) appears as a rank-2 modification to the identity matrix in rows and columns k and k+1. When PIVOT = 'T' (Top pivot), the rotation is performed for the plane (1,k+1), so P(k) has the form P(k) = ( c(k) s(k) ) ( 1 ) ( ... ) ( 1 ) ( -s(k) c(k) ) ( 1 ) ( ... ) ( 1 ) where R(k) appears in rows and columns 1 and k+1. Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is performed for the plane (k,z), giving P(k) the form P(k) = ( 1 ) ( ... ) ( 1 ) ( c(k) s(k) ) ( 1 ) ( ... ) ( 1 ) ( -s(k) c(k) ) where R(k) appears in rows and columns k and z. The rotations are performed without ever forming P(k) explicitly. Arguments ========= SIDE (input) CHARACTER*1 Specifies whether the plane rotation matrix P is applied to A on the left or the right. = 'L': Left, compute A := P*A = 'R': Right, compute A:= A*P**T PIVOT (input) CHARACTER*1 Specifies the plane for which P(k) is a plane rotation matrix. = 'V': Variable pivot, the plane (k,k+1) = 'T': Top pivot, the plane (1,k+1) = 'B': Bottom pivot, the plane (k,z) DIRECT (input) CHARACTER*1 Specifies whether P is a forward or backward sequence of plane rotations. = 'F': Forward, P = P(z-1)*...*P(2)*P(1) = 'B': Backward, P = P(1)*P(2)*...*P(z-1) M (input) INTEGER The number of rows of the matrix A. If m <= 1, an immediate return is effected. N (input) INTEGER The number of columns of the matrix A. If n <= 1, an immediate return is effected. C (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' The cosines c(k) of the plane rotations. S (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' The sines s(k) of the plane rotations. The 2-by-2 plane rotation part of the matrix P(k), R(k), has the form R(k) = ( c(k) s(k) ) ( -s(k) c(k) ). A (input/output) DOUBLE PRECISION array, dimension (LDA,N) The M-by-N matrix A. On exit, A is overwritten by P*A if SIDE = 'R' or by A*P**T if SIDE = 'L'. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). ===================================================================== Test the input parameters */ /* Parameter adjustments */ --c__; --s; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! (lsame_(side, "L") || lsame_(side, "R"))) { info = 1; } else if (! (lsame_(pivot, "V") || lsame_(pivot, "T") || lsame_(pivot, "B"))) { info = 2; } else if (! (lsame_(direct, "F") || lsame_(direct, "B"))) { info = 3; } else if (*m < 0) { info = 4; } else if (*n < 0) { info = 5; } else if (*lda < max(1,*m)) { info = 9; } if (info != 0) { xerbla_("DLASR ", &info); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } if (lsame_(side, "L")) { /* Form P * A */ if (lsame_(pivot, "V")) { if (lsame_(direct, "F")) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + 1 + i__ * a_dim1]; a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j + i__ * a_dim1]; /* L10: */ } } /* L20: */ } } else if (lsame_(direct, "B")) { for (j = *m - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + 1 + i__ * a_dim1]; a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j + i__ * a_dim1]; /* L30: */ } } /* L40: */ } } } else if (lsame_(pivot, "T")) { if (lsame_(direct, "F")) { i__1 = *m; for (j = 2; j <= i__1; ++j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ i__ * a_dim1 + 1]; a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ i__ * a_dim1 + 1]; /* L50: */ } } /* L60: */ } } else if (lsame_(direct, "B")) { for (j = *m; j >= 2; --j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ i__ * a_dim1 + 1]; a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ i__ * a_dim1 + 1]; /* L70: */ } } /* L80: */ } } } else if (lsame_(pivot, "B")) { if (lsame_(direct, "F")) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] + ctemp * temp; a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * a_dim1] - stemp * temp; /* L90: */ } } /* L100: */ } } else if (lsame_(direct, "B")) { for (j = *m - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] + ctemp * temp; a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * a_dim1] - stemp * temp; /* L110: */ } } /* L120: */ } } } } else if (lsame_(side, "R")) { /* Form A * P' */ if (lsame_(pivot, "V")) { if (lsame_(direct, "F")) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + (j + 1) * a_dim1]; a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ i__ + j * a_dim1]; /* L130: */ } } /* L140: */ } } else if (lsame_(direct, "B")) { for (j = *n - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + (j + 1) * a_dim1]; a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ i__ + j * a_dim1]; /* L150: */ } } /* L160: */ } } } else if (lsame_(pivot, "T")) { if (lsame_(direct, "F")) { i__1 = *n; for (j = 2; j <= i__1; ++j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ i__ + a_dim1]; a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + a_dim1]; /* L170: */ } } /* L180: */ } } else if (lsame_(direct, "B")) { for (j = *n; j >= 2; --j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ i__ + a_dim1]; a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + a_dim1]; /* L190: */ } } /* L200: */ } } } else if (lsame_(pivot, "B")) { if (lsame_(direct, "F")) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] + ctemp * temp; a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * a_dim1] - stemp * temp; /* L210: */ } } /* L220: */ } } else if (lsame_(direct, "B")) { for (j = *n - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] + ctemp * temp; a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * a_dim1] - stemp * temp; /* L230: */ } } /* L240: */ } } } } return 0; /* End of DLASR */ } /* dlasr_ */ /* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer * info) { /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer endd, i__, j; extern logical lsame_(char *, char *); static integer stack[64] /* was [2][32] */; static doublereal dmnmx, d1, d2, d3; static integer start; extern /* Subroutine */ int xerbla_(char *, integer *); static integer stkpnt, dir; static doublereal tmp; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Sort the numbers in D in increasing order (if ID = 'I') or in decreasing order (if ID = 'D' ). Use Quick Sort, reverting to Insertion sort on arrays of size <= 20. Dimension of STACK limits N to about 2**32. Arguments ========= ID (input) CHARACTER*1 = 'I': sort D in increasing order; = 'D': sort D in decreasing order. N (input) INTEGER The length of the array D. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the array to be sorted. On exit, D has been sorted into increasing order (D(1) <= ... <= D(N) ) or into decreasing order (D(1) >= ... >= D(N) ), depending on ID. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input paramters. */ /* Parameter adjustments */ --d__; /* Function Body */ *info = 0; dir = -1; if (lsame_(id, "D")) { dir = 0; } else if (lsame_(id, "I")) { dir = 1; } if (dir == -1) { *info = -1; } else if (*n < 0) { *info = -2; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASRT", &i__1); return 0; } /* Quick return if possible */ if (*n <= 1) { return 0; } stkpnt = 1; stack[0] = 1; stack[1] = *n; L10: start = stack[(stkpnt << 1) - 2]; endd = stack[(stkpnt << 1) - 1]; --stkpnt; if (endd - start <= 20 && endd - start > 0) { /* Do Insertion sort on D( START:ENDD ) */ if (dir == 0) { /* Sort into decreasing order */ i__1 = endd; for (i__ = start + 1; i__ <= i__1; ++i__) { i__2 = start + 1; for (j = i__; j >= i__2; --j) { if (d__[j] > d__[j - 1]) { dmnmx = d__[j]; d__[j] = d__[j - 1]; d__[j - 1] = dmnmx; } else { goto L30; } /* L20: */ } L30: ; } } else { /* Sort into increasing order */ i__1 = endd; for (i__ = start + 1; i__ <= i__1; ++i__) { i__2 = start + 1; for (j = i__; j >= i__2; --j) { if (d__[j] < d__[j - 1]) { dmnmx = d__[j]; d__[j] = d__[j - 1]; d__[j - 1] = dmnmx; } else { goto L50; } /* L40: */ } L50: ; } } } else if (endd - start > 20) { /* Partition D( START:ENDD ) and stack parts, largest one first Choose partition entry as median of 3 */ d1 = d__[start]; d2 = d__[endd]; i__ = (start + endd) / 2; d3 = d__[i__]; if (d1 < d2) { if (d3 < d1) { dmnmx = d1; } else if (d3 < d2) { dmnmx = d3; } else { dmnmx = d2; } } else { if (d3 < d2) { dmnmx = d2; } else if (d3 < d1) { dmnmx = d3; } else { dmnmx = d1; } } if (dir == 0) { /* Sort into decreasing order */ i__ = start - 1; j = endd + 1; L60: L70: --j; if (d__[j] < dmnmx) { goto L70; } L80: ++i__; if (d__[i__] > dmnmx) { goto L80; } if (i__ < j) { tmp = d__[i__]; d__[i__] = d__[j]; d__[j] = tmp; goto L60; } if (j - start > endd - j - 1) { ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; } else { ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; } } else { /* Sort into increasing order */ i__ = start - 1; j = endd + 1; L90: L100: --j; if (d__[j] > dmnmx) { goto L100; } L110: ++i__; if (d__[i__] < dmnmx) { goto L110; } if (i__ < j) { tmp = d__[i__]; d__[i__] = d__[j]; d__[j] = tmp; goto L90; } if (j - start > endd - j - 1) { ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; } else { ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; } } } if (stkpnt > 0) { goto L10; } return 0; /* End of DLASRT */ } /* dlasrt_ */ /* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx, doublereal *scale, doublereal *sumsq) { /* System generated locals */ integer i__1, i__2; doublereal d__1; /* Local variables */ static doublereal absxi; static integer ix; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASSQ returns the values scl and smsq such that ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is assumed to be non-negative and scl returns the value scl = max( scale, abs( x( i ) ) ). scale and sumsq must be supplied in SCALE and SUMSQ and scl and smsq are overwritten on SCALE and SUMSQ respectively. The routine makes only one pass through the vector x. Arguments ========= N (input) INTEGER The number of elements to be used from the vector X. X (input) DOUBLE PRECISION array, dimension (N) The vector for which a scaled sum of squares is computed. x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. INCX (input) INTEGER The increment between successive values of the vector X. INCX > 0. SCALE (input/output) DOUBLE PRECISION On entry, the value scale in the equation above. On exit, SCALE is overwritten with scl , the scaling factor for the sum of squares. SUMSQ (input/output) DOUBLE PRECISION On entry, the value sumsq in the equation above. On exit, SUMSQ is overwritten with smsq , the basic sum of squares from which scl has been factored out. ===================================================================== */ /* Parameter adjustments */ --x; /* Function Body */ if (*n > 0) { i__1 = (*n - 1) * *incx + 1; i__2 = *incx; for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { if (x[ix] != 0.) { absxi = (d__1 = x[ix], abs(d__1)); if (*scale < absxi) { /* Computing 2nd power */ d__1 = *scale / absxi; *sumsq = *sumsq * (d__1 * d__1) + 1; *scale = absxi; } else { /* Computing 2nd power */ d__1 = absxi / *scale; *sumsq += d__1 * d__1; } } /* L10: */ } } return 0; /* End of DLASSQ */ } /* dlassq_ */ /* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__, doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal * csr, doublereal *snl, doublereal *csl) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer pmax; static doublereal temp; static logical swap; static doublereal a, d__, l, m, r__, s, t, tsign, fa, ga, ha; static doublereal ft, gt, ht, mm; static logical gasmal; static doublereal tt, clt, crt, slt, srt; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASV2 computes the singular value decomposition of a 2-by-2 triangular matrix [ F G ] [ 0 H ]. On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and right singular vectors for abs(SSMAX), giving the decomposition [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ] [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ]. Arguments ========= F (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. G (input) DOUBLE PRECISION The (1,2) element of the 2-by-2 matrix. H (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. SSMIN (output) DOUBLE PRECISION abs(SSMIN) is the smaller singular value. SSMAX (output) DOUBLE PRECISION abs(SSMAX) is the larger singular value. SNL (output) DOUBLE PRECISION CSL (output) DOUBLE PRECISION The vector (CSL, SNL) is a unit left singular vector for the singular value abs(SSMAX). SNR (output) DOUBLE PRECISION CSR (output) DOUBLE PRECISION The vector (CSR, SNR) is a unit right singular vector for the singular value abs(SSMAX). Further Details =============== Any input parameter may be aliased with any output parameter. Barring over/underflow and assuming a guard digit in subtraction, all output quantities are correct to within a few units in the last place (ulps). In IEEE arithmetic, the code works correctly if one matrix element is infinite. Overflow will not occur unless the largest singular value itself overflows or is within a few ulps of overflow. (On machines with partial overflow, like the Cray, overflow may occur if the largest singular value is within a factor of 2 of overflow.) Underflow is harmless if underflow is gradual. Otherwise, results may correspond to a matrix modified by perturbations of size near the underflow threshold. ===================================================================== */ ft = *f; fa = abs(ft); ht = *h__; ha = abs(*h__); /* PMAX points to the maximum absolute element of matrix PMAX = 1 if F largest in absolute values PMAX = 2 if G largest in absolute values PMAX = 3 if H largest in absolute values */ pmax = 1; swap = ha > fa; if (swap) { pmax = 3; temp = ft; ft = ht; ht = temp; temp = fa; fa = ha; ha = temp; /* Now FA .ge. HA */ } gt = *g; ga = abs(gt); if (ga == 0.) { /* Diagonal matrix */ *ssmin = ha; *ssmax = fa; clt = 1.; crt = 1.; slt = 0.; srt = 0.; } else { gasmal = TRUE_; if (ga > fa) { pmax = 2; if (fa / ga < EPSILON) { /* Case of very large GA */ gasmal = FALSE_; *ssmax = ga; if (ha > 1.) { *ssmin = fa / (ga / ha); } else { *ssmin = fa / ga * ha; } clt = 1.; slt = ht / gt; srt = 1.; crt = ft / gt; } } if (gasmal) { /* Normal case */ d__ = fa - ha; if (d__ == fa) { /* Copes with infinite F or H */ l = 1.; } else { l = d__ / fa; } /* Note that 0 .le. L .le. 1 */ m = gt / ft; /* Note that abs(M) .le. 1/macheps */ t = 2. - l; /* Note that T .ge. 1 */ mm = m * m; tt = t * t; s = sqrt(tt + mm); /* Note that 1 .le. S .le. 1 + 1/macheps */ if (l == 0.) { r__ = abs(m); } else { r__ = sqrt(l * l + mm); } /* Note that 0 .le. R .le. 1 + 1/macheps */ a = (s + r__) * .5; /* Note that 1 .le. A .le. 1 + abs(M) */ *ssmin = ha / a; *ssmax = fa * a; if (mm == 0.) { /* Note that M is very tiny */ if (l == 0.) { t = d_sign(&c_b3176, &ft) * d_sign(&c_b15, >); } else { t = gt / d_sign(&d__, &ft) + m / t; } } else { t = (m / (s + t) + m / (r__ + l)) * (a + 1.); } l = sqrt(t * t + 4.); crt = 2. / l; srt = t / l; clt = (crt + srt * m) / a; slt = ht / ft * srt / a; } } if (swap) { *csl = srt; *snl = crt; *csr = slt; *snr = clt; } else { *csl = clt; *snl = slt; *csr = crt; *snr = srt; } /* Correct signs of SSMAX and SSMIN */ if (pmax == 1) { tsign = d_sign(&c_b15, csr) * d_sign(&c_b15, csl) * d_sign(&c_b15, f); } if (pmax == 2) { tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, csl) * d_sign(&c_b15, g); } if (pmax == 3) { tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, snl) * d_sign(&c_b15, h__); } *ssmax = d_sign(ssmax, &tsign); d__1 = tsign * d_sign(&c_b15, f) * d_sign(&c_b15, h__); *ssmin = d_sign(ssmin, &d__1); return 0; /* End of DLASV2 */ } /* dlasv2_ */ /* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer *k1, integer *k2, integer *ipiv, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static doublereal temp; static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. Arguments ========= N (input) INTEGER The number of columns of the matrix A. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. LDA (input) INTEGER The leading dimension of the array A. K1 (input) INTEGER The first element of IPIV for which a row interchange will be done. K2 (input) INTEGER The last element of IPIV for which a row interchange will be done. IPIV (input) INTEGER array, dimension (K2*abs(INCX)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. INCX (input) INTEGER The increment between successive values of IPIV. If IPIV is negative, the pivots are applied in reverse order. Further Details =============== Modified by R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA ===================================================================== Interchange row I with row IPIV(I) for each of rows K1 through K2. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ if (*incx > 0) { ix0 = *k1; i1 = *k1; i2 = *k2; inc = 1; } else if (*incx < 0) { ix0 = (1 - *k2) * *incx + 1; i1 = *k2; i2 = *k1; inc = -1; } else { return 0; } n32 = *n / 32 << 5; if (n32 != 0) { i__1 = n32; for (j = 1; j <= i__1; j += 32) { ix = ix0; i__2 = i2; i__3 = inc; for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) { ip = ipiv[ix]; if (ip != i__) { i__4 = j + 31; for (k = j; k <= i__4; ++k) { temp = a[i__ + k * a_dim1]; a[i__ + k * a_dim1] = a[ip + k * a_dim1]; a[ip + k * a_dim1] = temp; /* L10: */ } } ix += *incx; /* L20: */ } /* L30: */ } } if (n32 != *n) { ++n32; ix = ix0; i__1 = i2; i__3 = inc; for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) { ip = ipiv[ix]; if (ip != i__) { i__2 = *n; for (k = n32; k <= i__2; ++k) { temp = a[i__ + k * a_dim1]; a[i__ + k * a_dim1] = a[ip + k * a_dim1]; a[ip + k * a_dim1] = temp; /* L40: */ } } ix += *incx; /* L50: */ } } return 0; /* End of DLASWP */ } /* dlaswp_ */ /* Subroutine */ int dlasy2_(logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info) { /* Initialized data */ static integer locu12[4] = { 3,4,1,2 }; static integer locl21[4] = { 2,1,4,3 }; static integer locu22[4] = { 4,3,2,1 }; static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; /* System generated locals */ integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1, x_offset; doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; /* Local variables */ static doublereal btmp[4], smin; static integer ipiv; static doublereal temp; static integer jpiv[4]; static doublereal xmax; static integer ipsv, jpsv, i__, j, k; static logical bswap; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static logical xswap; static doublereal x2[2], l21, u11, u12; static integer ip, jp; static doublereal u22, t16[16] /* was [4][4] */; extern integer idamax_(integer *, doublereal *, integer *); static doublereal smlnum, gam, bet, eps, sgn, tmp[4], tau1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASY2 solves for the N1 by N2 matrix X, 1 <= N1,N2 <= 2, in op(TL)*X + ISGN*X*op(TR) = SCALE*B, where TL is N1 by N1, TR is N2 by N2, B is N1 by N2, and ISGN = 1 or -1. op(T) = T or T', where T' denotes the transpose of T. Arguments ========= LTRANL (input) LOGICAL On entry, LTRANL specifies the op(TL): = .FALSE., op(TL) = TL, = .TRUE., op(TL) = TL'. LTRANR (input) LOGICAL On entry, LTRANR specifies the op(TR): = .FALSE., op(TR) = TR, = .TRUE., op(TR) = TR'. ISGN (input) INTEGER On entry, ISGN specifies the sign of the equation as described before. ISGN may only be 1 or -1. N1 (input) INTEGER On entry, N1 specifies the order of matrix TL. N1 may only be 0, 1 or 2. N2 (input) INTEGER On entry, N2 specifies the order of matrix TR. N2 may only be 0, 1 or 2. TL (input) DOUBLE PRECISION array, dimension (LDTL,2) On entry, TL contains an N1 by N1 matrix. LDTL (input) INTEGER The leading dimension of the matrix TL. LDTL >= max(1,N1). TR (input) DOUBLE PRECISION array, dimension (LDTR,2) On entry, TR contains an N2 by N2 matrix. LDTR (input) INTEGER The leading dimension of the matrix TR. LDTR >= max(1,N2). B (input) DOUBLE PRECISION array, dimension (LDB,2) On entry, the N1 by N2 matrix B contains the right-hand side of the equation. LDB (input) INTEGER The leading dimension of the matrix B. LDB >= max(1,N1). SCALE (output) DOUBLE PRECISION On exit, SCALE contains the scale factor. SCALE is chosen less than or equal to 1 to prevent the solution overflowing. X (output) DOUBLE PRECISION array, dimension (LDX,2) On exit, X contains the N1 by N2 solution. LDX (input) INTEGER The leading dimension of the matrix X. LDX >= max(1,N1). XNORM (output) DOUBLE PRECISION On exit, XNORM is the infinity-norm of the solution. INFO (output) INTEGER On exit, INFO is set to 0: successful exit. 1: TL and TR have too close eigenvalues, so TL or TR is perturbed to get a nonsingular equation. NOTE: In the interests of speed, this routine does not check the inputs for errors. ===================================================================== */ /* Parameter adjustments */ tl_dim1 = *ldtl; tl_offset = 1 + tl_dim1 * 1; tl -= tl_offset; tr_dim1 = *ldtr; tr_offset = 1 + tr_dim1 * 1; tr -= tr_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; /* Function Body */ /* Do not check the input parameters for errors */ *info = 0; /* Quick return if possible */ if (*n1 == 0 || *n2 == 0) { return 0; } /* Set constants to control overflow */ eps = PRECISION; smlnum = SAFEMINIMUM / eps; sgn = (doublereal) (*isgn); k = *n1 + *n1 + *n2 - 2; switch (k) { case 1: goto L10; case 2: goto L20; case 3: goto L30; case 4: goto L50; } /* 1 by 1: TL11*X + SGN*X*TR11 = B11 */ L10: tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; bet = abs(tau1); if (bet <= smlnum) { tau1 = smlnum; bet = smlnum; *info = 1; } *scale = 1.; gam = (d__1 = b[b_dim1 + 1], abs(d__1)); if (smlnum * gam > bet) { *scale = 1. / gam; } x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); return 0; /* 1 by 2: TL11*[X11 X12] + ISGN*[X11 X12]*op[TR11 TR12] = [B11 B12] [TR21 TR22] */ L20: /* Computing MAX Computing MAX */ d__7 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__8 = (d__2 = tr[tr_dim1 + 1] , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tr[(tr_dim1 << 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tr[ tr_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = tr[(tr_dim1 << 1) + 2], abs(d__5)); d__6 = eps * max(d__7,d__8); smin = max(d__6,smlnum); tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; if (*ltranr) { tmp[1] = sgn * tr[tr_dim1 + 2]; tmp[2] = sgn * tr[(tr_dim1 << 1) + 1]; } else { tmp[1] = sgn * tr[(tr_dim1 << 1) + 1]; tmp[2] = sgn * tr[tr_dim1 + 2]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[(b_dim1 << 1) + 1]; goto L40; /* 2 by 1: op[TL11 TL12]*[X11] + ISGN* [X11]*TR11 = [B11] [TL21 TL22] [X21] [X21] [B21] */ L30: /* Computing MAX Computing MAX */ d__7 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__8 = (d__2 = tl[tl_dim1 + 1] , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tl[(tl_dim1 << 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tl[ tl_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = tl[(tl_dim1 << 1) + 2], abs(d__5)); d__6 = eps * max(d__7,d__8); smin = max(d__6,smlnum); tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; if (*ltranl) { tmp[1] = tl[(tl_dim1 << 1) + 1]; tmp[2] = tl[tl_dim1 + 2]; } else { tmp[1] = tl[tl_dim1 + 2]; tmp[2] = tl[(tl_dim1 << 1) + 1]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[b_dim1 + 2]; L40: /* Solve 2 by 2 system using complete pivoting. Set pivots less than SMIN to SMIN. */ ipiv = idamax_(&c__4, tmp, &c__1); u11 = tmp[ipiv - 1]; if (abs(u11) <= smin) { *info = 1; u11 = smin; } u12 = tmp[locu12[ipiv - 1] - 1]; l21 = tmp[locl21[ipiv - 1] - 1] / u11; u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21; xswap = xswpiv[ipiv - 1]; bswap = bswpiv[ipiv - 1]; if (abs(u22) <= smin) { *info = 1; u22 = smin; } if (bswap) { temp = btmp[1]; btmp[1] = btmp[0] - l21 * temp; btmp[0] = temp; } else { btmp[1] -= l21 * btmp[0]; } *scale = 1.; if (smlnum * 2. * abs(btmp[1]) > abs(u22) || smlnum * 2. * abs(btmp[0]) > abs(u11)) { /* Computing MAX */ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]); *scale = .5 / max(d__1,d__2); btmp[0] *= *scale; btmp[1] *= *scale; } x2[1] = btmp[1] / u22; x2[0] = btmp[0] / u11 - u12 / u11 * x2[1]; if (xswap) { temp = x2[1]; x2[1] = x2[0]; x2[0] = temp; } x[x_dim1 + 1] = x2[0]; if (*n1 == 1) { x[(x_dim1 << 1) + 1] = x2[1]; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) + 1], abs(d__2)); } else { x[x_dim1 + 2] = x2[1]; /* Computing MAX */ d__3 = (d__1 = x[x_dim1 + 1], abs(d__1)), d__4 = (d__2 = x[x_dim1 + 2] , abs(d__2)); *xnorm = max(d__3,d__4); } return 0; /* 2 by 2: op[TL11 TL12]*[X11 X12] +ISGN* [X11 X12]*op[TR11 TR12] = [B11 B12] [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] Solve equivalent 4 by 4 system using complete pivoting. Set pivots less than SMIN to SMIN. */ L50: /* Computing MAX */ d__5 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__6 = (d__2 = tr[(tr_dim1 << 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tr[ tr_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = tr[(tr_dim1 << 1) + 2], abs(d__4)); smin = max(d__5,d__6); /* Computing MAX */ d__5 = smin, d__6 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__5 = max(d__5, d__6), d__6 = (d__2 = tl[(tl_dim1 << 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tl[tl_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = tl[(tl_dim1 << 1) + 2], abs(d__4)) ; smin = max(d__5,d__6); /* Computing MAX */ d__1 = eps * smin; smin = max(d__1,smlnum); btmp[0] = 0.; dcopy_(&c__16, btmp, &c__0, t16, &c__1); t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2]; if (*ltranl) { t16[4] = tl[tl_dim1 + 2]; t16[1] = tl[(tl_dim1 << 1) + 1]; t16[14] = tl[tl_dim1 + 2]; t16[11] = tl[(tl_dim1 << 1) + 1]; } else { t16[4] = tl[(tl_dim1 << 1) + 1]; t16[1] = tl[tl_dim1 + 2]; t16[14] = tl[(tl_dim1 << 1) + 1]; t16[11] = tl[tl_dim1 + 2]; } if (*ltranr) { t16[8] = sgn * tr[(tr_dim1 << 1) + 1]; t16[13] = sgn * tr[(tr_dim1 << 1) + 1]; t16[2] = sgn * tr[tr_dim1 + 2]; t16[7] = sgn * tr[tr_dim1 + 2]; } else { t16[8] = sgn * tr[tr_dim1 + 2]; t16[13] = sgn * tr[tr_dim1 + 2]; t16[2] = sgn * tr[(tr_dim1 << 1) + 1]; t16[7] = sgn * tr[(tr_dim1 << 1) + 1]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[b_dim1 + 2]; btmp[2] = b[(b_dim1 << 1) + 1]; btmp[3] = b[(b_dim1 << 1) + 2]; /* Perform elimination */ for (i__ = 1; i__ <= 3; ++i__) { xmax = 0.; for (ip = i__; ip <= 4; ++ip) { for (jp = i__; jp <= 4; ++jp) { if ((d__1 = t16[ip + (jp << 2) - 5], abs(d__1)) >= xmax) { xmax = (d__1 = t16[ip + (jp << 2) - 5], abs(d__1)); ipsv = ip; jpsv = jp; } /* L60: */ } /* L70: */ } if (ipsv != i__) { dswap_(&c__4, &t16[ipsv - 1], &c__4, &t16[i__ - 1], &c__4); temp = btmp[i__ - 1]; btmp[i__ - 1] = btmp[ipsv - 1]; btmp[ipsv - 1] = temp; } if (jpsv != i__) { dswap_(&c__4, &t16[(jpsv << 2) - 4], &c__1, &t16[(i__ << 2) - 4], &c__1); } jpiv[i__ - 1] = jpsv; if ((d__1 = t16[i__ + (i__ << 2) - 5], abs(d__1)) < smin) { *info = 1; t16[i__ + (i__ << 2) - 5] = smin; } for (j = i__ + 1; j <= 4; ++j) { t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5]; btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1]; for (k = i__ + 1; k <= 4; ++k) { t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + ( k << 2) - 5]; /* L80: */ } /* L90: */ } /* L100: */ } if (abs(t16[15]) < smin) { t16[15] = smin; } *scale = 1.; if (smlnum * 8. * abs(btmp[0]) > abs(t16[0]) || smlnum * 8. * abs(btmp[1]) > abs(t16[5]) || smlnum * 8. * abs(btmp[2]) > abs(t16[10]) || smlnum * 8. * abs(btmp[3]) > abs(t16[15])) { /* Computing MAX */ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]), d__1 = max(d__1,d__2), d__2 = abs(btmp[2]), d__1 = max(d__1,d__2), d__2 = abs(btmp[3]); *scale = .125 / max(d__1,d__2); btmp[0] *= *scale; btmp[1] *= *scale; btmp[2] *= *scale; btmp[3] *= *scale; } for (i__ = 1; i__ <= 4; ++i__) { k = 5 - i__; temp = 1. / t16[k + (k << 2) - 5]; tmp[k - 1] = btmp[k - 1] * temp; for (j = k + 1; j <= 4; ++j) { tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1]; /* L110: */ } /* L120: */ } for (i__ = 1; i__ <= 3; ++i__) { if (jpiv[4 - i__ - 1] != 4 - i__) { temp = tmp[4 - i__ - 1]; tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1]; tmp[jpiv[4 - i__ - 1] - 1] = temp; } /* L130: */ } x[x_dim1 + 1] = tmp[0]; x[x_dim1 + 2] = tmp[1]; x[(x_dim1 << 1) + 1] = tmp[2]; x[(x_dim1 << 1) + 2] = tmp[3]; /* Computing MAX */ d__1 = abs(tmp[0]) + abs(tmp[2]), d__2 = abs(tmp[1]) + abs(tmp[3]); *xnorm = max(d__1,d__2); return 0; /* End of DLASY2 */ } /* dlasy2_ */ /* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal * a, integer *lda, doublereal *e, doublereal *tau, doublereal *w, integer *ldw) { /* System generated locals */ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i__; static doublereal alpha; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dsymv_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static integer iw; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLATRD reduces NB rows and columns of a real symmetric matrix A to symmetric tridiagonal form by an orthogonal similarity transformation Q' * A * Q, and returns the matrices V and W which are needed to apply the transformation to the unreduced part of A. If UPLO = 'U', DLATRD reduces the last NB rows and columns of a matrix, of which the upper triangle is supplied; if UPLO = 'L', DLATRD reduces the first NB rows and columns of a matrix, of which the lower triangle is supplied. This is an auxiliary routine called by DSYTRD. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored: = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. NB (input) INTEGER The number of rows and columns to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit: if UPLO = 'U', the last NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements above the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the first NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements below the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= (1,N). E (output) DOUBLE PRECISION array, dimension (N-1) If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal elements of the last NB columns of the reduced matrix; if UPLO = 'L', E(1:nb) contains the subdiagonal elements of the first NB columns of the reduced matrix. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors, stored in TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. See Further Details. W (output) DOUBLE PRECISION array, dimension (LDW,NB) The n-by-nb matrix W required to update the unreduced part of A. LDW (input) INTEGER The leading dimension of the array W. LDW >= max(1,N). Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n) H(n-1) . . . H(n-nb+1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), and tau in TAU(i-1). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), and tau in TAU(i). The elements of the vectors v together form the n-by-nb matrix V which is needed, with W, to apply the transformation to the unreduced part of the matrix, using a symmetric rank-2k update of the form: A := A - V*W' - W*V'. The contents of A on exit are illustrated by the following examples with n = 5 and nb = 2: if UPLO = 'U': if UPLO = 'L': ( a a a v4 v5 ) ( d ) ( a a v4 v5 ) ( 1 d ) ( a 1 v5 ) ( v1 1 a ) ( d 1 ) ( v1 v2 a a ) ( d ) ( v1 v2 a a a ) where d denotes a diagonal element of the reduced matrix, a denotes an element of the original matrix that is unchanged, and vi denotes an element of the vector defining H(i). ===================================================================== Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --e; --tau; w_dim1 = *ldw; w_offset = 1 + w_dim1 * 1; w -= w_offset; /* Function Body */ if (*n <= 0) { return 0; } if (lsame_(uplo, "U")) { /* Reduce last NB columns of upper triangle */ i__1 = *n - *nb + 1; for (i__ = *n; i__ >= i__1; --i__) { iw = i__ - *n + *nb; if (i__ < *n) { /* Update A(1:i,i) */ i__2 = *n - i__; dgemv_("No transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & c_b15, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; dgemv_("No transpose", &i__, &i__2, &c_b151, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b15, &a[i__ * a_dim1 + 1], &c__1); } if (i__ > 1) { /* Generate elementary reflector H(i) to annihilate A(1:i-2,i) */ i__2 = i__ - 1; dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - 1]); e[i__ - 1] = a[i__ - 1 + i__ * a_dim1]; a[i__ - 1 + i__ * a_dim1] = 1.; /* Compute W(1:i-1,i) */ i__2 = i__ - 1; dsymv_("Upper", &i__2, &c_b15, &a[a_offset], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b29, &w[iw * w_dim1 + 1], & c__1); if (i__ < *n) { i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, & c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, & c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[(iw + 1) * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); } i__2 = i__ - 1; dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &c__1); i__2 = i__ - 1; daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * w_dim1 + 1], &c__1); } /* L10: */ } } else { /* Reduce first NB columns of lower triangle */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:n,i) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], lda, &w[i__ + w_dim1], ldw, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + w_dim1], ldw, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); if (i__ < *n) { /* Generate elementary reflector H(i) to annihilate A(i+2:n,i) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute W(i+1:n,i) */ i__2 = *n - i__; dsymv_("Lower", &i__2, &c_b15, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b29, &w[i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[i__ + 1 + w_dim1] , ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + 1 + w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *n - i__; daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ i__ + 1 + i__ * w_dim1], &c__1); } /* L20: */ } } return 0; /* End of DLATRD */ } /* dlatrd_ */ /* Subroutine */ int dlazq3_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal temp, g, s, t; static integer j4; extern /* Subroutine */ int dlasq5_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, logical *), dlasq6_( integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlazq4_(integer *, integer *, doublereal *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *); static integer nn; static doublereal safmin, eps, tol; static integer n0in, ipn4; static doublereal tol2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAZQ3 checks for deflation, computes a shift (TAU) and calls dqds. In case of failure it changes shifts, and tries again until output is positive. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. PP (input) INTEGER PP=0 for ping, PP=1 for pong. DMIN (output) DOUBLE PRECISION Minimum value of d. SIGMA (output) DOUBLE PRECISION Sum of shifts used in current segment. DESIG (input/output) DOUBLE PRECISION Lower order part of SIGMA QMAX (input) DOUBLE PRECISION Maximum value of q. NFAIL (output) INTEGER Number of times shift was too big. ITER (output) INTEGER Number of iterations. NDIV (output) INTEGER Number of divisions. IEEE (input) LOGICAL Flag for IEEE or non IEEE arithmetic (passed to DLASQ5). TTYPE (input/output) INTEGER Shift type. TTYPE is passed as an argument in order to save its value between calls to DLAZQ3 DMIN1 (input/output) REAL DMIN2 (input/output) REAL DN (input/output) REAL DN1 (input/output) REAL DN2 (input/output) REAL TAU (input/output) REAL These are passed as arguments in order to save their values between calls to DLAZQ3 This is a thread safe version of DLASQ3, which passes TTYPE, DMIN1, DMIN2, DN, DN1. DN2 and TAU through the argument list in place of declaring them in a SAVE statment. ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ n0in = *n0; eps = PRECISION; safmin = SAFEMINIMUM; tol = eps * 100.; /* Computing 2nd power */ d__1 = tol; tol2 = d__1 * d__1; g = 0.; /* Check for deflation. */ L10: if (*n0 < *i0) { return 0; } if (*n0 == *i0) { goto L20; } nn = (*n0 << 2) + *pp; if (*n0 == *i0 + 1) { goto L40; } /* Check whether E(N0-1) is negligible, 1 eigenvalue. */ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) - 4] > tol2 * z__[nn - 7]) { goto L30; } L20: z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma; --(*n0); goto L10; /* Check whether E(N0-2) is negligible, 2 eigenvalues. */ L30: if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[ nn - 11]) { goto L50; } L40: if (z__[nn - 3] > z__[nn - 7]) { s = z__[nn - 3]; z__[nn - 3] = z__[nn - 7]; z__[nn - 7] = s; } if (z__[nn - 5] > z__[nn - 3] * tol2) { t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5; s = z__[nn - 3] * (z__[nn - 5] / t); if (s <= t) { s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.))); } else { s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s))); } t = z__[nn - 7] + (s + z__[nn - 5]); z__[nn - 3] *= z__[nn - 7] / t; z__[nn - 7] = t; } z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma; z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma; *n0 += -2; goto L10; L50: /* Reverse the qd-array, if warranted. */ if (*dmin__ <= 0. || *n0 < n0in) { if (z__[(*i0 << 2) + *pp - 3] * 1.5 < z__[(*n0 << 2) + *pp - 3]) { ipn4 = *i0 + *n0 << 2; i__1 = *i0 + *n0 - 1 << 1; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { temp = z__[j4 - 3]; z__[j4 - 3] = z__[ipn4 - j4 - 3]; z__[ipn4 - j4 - 3] = temp; temp = z__[j4 - 2]; z__[j4 - 2] = z__[ipn4 - j4 - 2]; z__[ipn4 - j4 - 2] = temp; temp = z__[j4 - 1]; z__[j4 - 1] = z__[ipn4 - j4 - 5]; z__[ipn4 - j4 - 5] = temp; temp = z__[j4]; z__[j4] = z__[ipn4 - j4 - 4]; z__[ipn4 - j4 - 4] = temp; /* L60: */ } if (*n0 - *i0 <= 4) { z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1]; z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp]; } /* Computing MIN */ d__1 = *dmin2, d__2 = z__[(*n0 << 2) + *pp - 1]; *dmin2 = min(d__1,d__2); /* Computing MIN */ d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*i0 << 2) + *pp - 1] , d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) + *pp + 3]; z__[(*n0 << 2) + *pp - 1] = min(d__1,d__2); /* Computing MIN */ d__1 = z__[(*n0 << 2) - *pp], d__2 = z__[(*i0 << 2) - *pp], d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) - *pp + 4]; z__[(*n0 << 2) - *pp] = min(d__1,d__2); /* Computing MAX */ d__1 = *qmax, d__2 = z__[(*i0 << 2) + *pp - 3], d__1 = max(d__1, d__2), d__2 = z__[(*i0 << 2) + *pp + 1]; *qmax = max(d__1,d__2); *dmin__ = 0.; } } /* Computing MIN */ d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*n0 << 2) + *pp - 9], d__1 = min(d__1,d__2), d__2 = *dmin2 + z__[(*n0 << 2) - *pp]; if (*dmin__ < 0. || safmin * *qmax < min(d__1,d__2)) { /* Choose a shift. */ dlazq4_(i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2, tau, ttype, &g); /* Call dqds until DMIN > 0. */ L80: dlasq5_(i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2, ieee); *ndiv += *n0 - *i0 + 2; ++(*iter); /* Check status. */ if (*dmin__ >= 0. && *dmin1 > 0.) { /* Success. */ goto L100; } else if (*dmin__ < 0. && *dmin1 > 0. && z__[(*n0 - 1 << 2) - *pp] < tol * (*sigma + *dn1) && abs(*dn) < tol * *sigma) { /* Convergence hidden by negative DN. */ z__[(*n0 - 1 << 2) - *pp + 2] = 0.; *dmin__ = 0.; goto L100; } else if (*dmin__ < 0.) { /* TAU too big. Select new TAU and try again. */ ++(*nfail); if (*ttype < -22) { /* Failed twice. Play it safe. */ *tau = 0.; } else if (*dmin1 > 0.) { /* Late failure. Gives excellent shift. */ *tau = (*tau + *dmin__) * (1. - eps * 2.); *ttype += -11; } else { /* Early failure. Divide by 4. */ *tau *= .25; *ttype += -12; } goto L80; } else if (*dmin__ != *dmin__) { /* NaN. */ *tau = 0.; goto L80; } else { /* Possible underflow. Play it safe. */ goto L90; } } /* Risk of underflow. */ L90: dlasq6_(i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2); *ndiv += *n0 - *i0 + 2; ++(*iter); *tau = 0.; L100: if (*tau < *sigma) { *desig += *tau; t = *sigma + *desig; *desig -= t - *sigma; } else { t = *sigma + *tau; *desig = *sigma - (t - *tau) + *desig; } *sigma = t; return 0; /* End of DLAZQ3 */ } /* dlazq3_ */ /* Subroutine */ int dlazq4_(integer *i0, integer *n0, doublereal *z__, integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau, integer *ttype, doublereal *g) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal s, a2, b1, b2; static integer i4, nn, np; static doublereal gam, gap1, gap2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAZQ4 computes an approximation TAU to the smallest eigenvalue using values of d from the previous transform. I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. PP (input) INTEGER PP=0 for ping, PP=1 for pong. N0IN (input) INTEGER The value of N0 at start of EIGTEST. DMIN (input) DOUBLE PRECISION Minimum value of d. DMIN1 (input) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (input) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (input) DOUBLE PRECISION d(N) DN1 (input) DOUBLE PRECISION d(N-1) DN2 (input) DOUBLE PRECISION d(N-2) TAU (output) DOUBLE PRECISION This is the shift. TTYPE (output) INTEGER Shift type. G (input/output) DOUBLE PRECISION G is passed as an argument in order to save its value between calls to DLAZQ4 Further Details =============== CNST1 = 9/16 This is a thread safe version of DLASQ4, which passes G through the argument list in place of declaring G in a SAVE statment. ===================================================================== A negative DMIN forces the shift to take that absolute value TTYPE records the type of shift. */ /* Parameter adjustments */ --z__; /* Function Body */ if (*dmin__ <= 0.) { *tau = -(*dmin__); *ttype = -1; return 0; } nn = (*n0 << 2) + *pp; if (*n0in == *n0) { /* No eigenvalues deflated. */ if (*dmin__ == *dn || *dmin__ == *dn1) { b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]); b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]); a2 = z__[nn - 7] + z__[nn - 5]; /* Cases 2 and 3. */ if (*dmin__ == *dn && *dmin1 == *dn1) { gap2 = *dmin2 - a2 - *dmin2 * .25; if (gap2 > 0. && gap2 > b2) { gap1 = a2 - *dn - b2 / gap2 * b2; } else { gap1 = a2 - *dn - (b1 + b2); } if (gap1 > 0. && gap1 > b1) { /* Computing MAX */ d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5; s = max(d__1,d__2); *ttype = -2; } else { s = 0.; if (*dn > b1) { s = *dn - b1; } if (a2 > b1 + b2) { /* Computing MIN */ d__1 = s, d__2 = a2 - (b1 + b2); s = min(d__1,d__2); } /* Computing MAX */ d__1 = s, d__2 = *dmin__ * .333; s = max(d__1,d__2); *ttype = -3; } } else { /* Case 4. */ *ttype = -4; s = *dmin__ * .25; if (*dmin__ == *dn) { gam = *dn; a2 = 0.; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b2 = z__[nn - 5] / z__[nn - 7]; np = nn - 9; } else { np = nn - (*pp << 1); b2 = z__[np - 2]; gam = *dn1; if (z__[np - 4] > z__[np - 2]) { return 0; } a2 = z__[np - 4] / z__[np - 2]; if (z__[nn - 9] > z__[nn - 11]) { return 0; } b2 = z__[nn - 9] / z__[nn - 11]; np = nn - 13; } /* Approximate contribution to norm squared from I < NN-1. */ a2 += b2; i__1 = (*i0 << 2) - 1 + *pp; for (i4 = np; i4 >= i__1; i4 += -4) { if (b2 == 0.) { goto L20; } b1 = b2; if (z__[i4] > z__[i4 - 2]) { return 0; } b2 *= z__[i4] / z__[i4 - 2]; a2 += b2; if (max(b2,b1) * 100. < a2 || .563 < a2) { goto L20; } /* L10: */ } L20: a2 *= 1.05; /* Rayleigh quotient residual bound. */ if (a2 < .563) { s = gam * (1. - sqrt(a2)) / (a2 + 1.); } } } else if (*dmin__ == *dn2) { /* Case 5. */ *ttype = -5; s = *dmin__ * .25; /* Compute contribution to norm squared from I > NN-2. */ np = nn - (*pp << 1); b1 = z__[np - 2]; b2 = z__[np - 6]; gam = *dn2; if (z__[np - 8] > b2 || z__[np - 4] > b1) { return 0; } a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.); /* Approximate contribution to norm squared from I < NN-2. */ if (*n0 - *i0 > 2) { b2 = z__[nn - 13] / z__[nn - 15]; a2 += b2; i__1 = (*i0 << 2) - 1 + *pp; for (i4 = nn - 17; i4 >= i__1; i4 += -4) { if (b2 == 0.) { goto L40; } b1 = b2; if (z__[i4] > z__[i4 - 2]) { return 0; } b2 *= z__[i4] / z__[i4 - 2]; a2 += b2; if (max(b2,b1) * 100. < a2 || .563 < a2) { goto L40; } /* L30: */ } L40: a2 *= 1.05; } if (a2 < .563) { s = gam * (1. - sqrt(a2)) / (a2 + 1.); } } else { /* Case 6, no information to guide us. */ if (*ttype == -6) { *g += (1. - *g) * .333; } else if (*ttype == -18) { *g = .083250000000000005; } else { *g = .25; } s = *g * *dmin__; *ttype = -6; } } else if (*n0in == *n0 + 1) { /* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */ if (*dmin1 == *dn1 && *dmin2 == *dn2) { /* Cases 7 and 8. */ *ttype = -7; s = *dmin1 * .333; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b1 = z__[nn - 5] / z__[nn - 7]; b2 = b1; if (b2 == 0.) { goto L60; } i__1 = (*i0 << 2) - 1 + *pp; for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { a2 = b1; if (z__[i4] > z__[i4 - 2]) { return 0; } b1 *= z__[i4] / z__[i4 - 2]; b2 += b1; if (max(b1,a2) * 100. < b2) { goto L60; } /* L50: */ } L60: b2 = sqrt(b2 * 1.05); /* Computing 2nd power */ d__1 = b2; a2 = *dmin1 / (d__1 * d__1 + 1.); gap2 = *dmin2 * .5 - a2; if (gap2 > 0. && gap2 > b2 * a2) { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); s = max(d__1,d__2); } else { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - b2 * 1.01); s = max(d__1,d__2); *ttype = -8; } } else { /* Case 9. */ s = *dmin1 * .25; if (*dmin1 == *dn1) { s = *dmin1 * .5; } *ttype = -9; } } else if (*n0in == *n0 + 2) { /* Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. Cases 10 and 11. */ if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) { *ttype = -10; s = *dmin2 * .333; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b1 = z__[nn - 5] / z__[nn - 7]; b2 = b1; if (b2 == 0.) { goto L80; } i__1 = (*i0 << 2) - 1 + *pp; for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { if (z__[i4] > z__[i4 - 2]) { return 0; } b1 *= z__[i4] / z__[i4 - 2]; b2 += b1; if (b1 * 100. < b2) { goto L80; } /* L70: */ } L80: b2 = sqrt(b2 * 1.05); /* Computing 2nd power */ d__1 = b2; a2 = *dmin2 / (d__1 * d__1 + 1.); gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[ nn - 9]) - a2; if (gap2 > 0. && gap2 > b2 * a2) { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); s = max(d__1,d__2); } else { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - b2 * 1.01); s = max(d__1,d__2); } } else { s = *dmin2 * .25; *ttype = -11; } } else if (*n0in > *n0 + 2) { /* Case 12, more than two eigenvalues deflated. No information. */ s = 0.; *ttype = -12; } *tau = s; return 0; /* End of DLAZQ4 */ } /* dlazq4_ */ /* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1; /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORG2R generates an m by n real matrix Q with orthonormal columns, which is defined as the first n columns of a product of k elementary reflectors of order m Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. M >= N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. N >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. On exit, the m-by-n matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0 || *n > *m) { *info = -2; } else if (*k < 0 || *k > *n) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DORG2R", &i__1); return 0; } /* Quick return if possible */ if (*n <= 0) { return 0; } /* Initialise columns k+1:n to columns of the unit matrix */ i__1 = *n; for (j = *k + 1; j <= i__1; ++j) { i__2 = *m; for (l = 1; l <= i__2; ++l) { a[l + j * a_dim1] = 0.; /* L10: */ } a[j + j * a_dim1] = 1.; /* L20: */ } for (i__ = *k; i__ >= 1; --i__) { /* Apply H(i) to A(i:m,i:n) from the left */ if (i__ < *n) { a[i__ + i__ * a_dim1] = 1.; i__1 = *m - i__ + 1; i__2 = *n - i__; dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); } if (i__ < *m) { i__1 = *m - i__; d__1 = -tau[i__]; dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1); } a[i__ + i__ * a_dim1] = 1. - tau[i__]; /* Set A(1:i-1,i) to zero */ i__1 = i__ - 1; for (l = 1; l <= i__1; ++l) { a[l + i__ * a_dim1] = 0.; /* L30: */ } /* L40: */ } return 0; /* End of DORG2R */ } /* dorg2r_ */ /* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); static integer iinfo; static logical wantq; static integer nb, mn; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dorglq_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGBR generates one of the real orthogonal matrices Q or P**T determined by DGEBRD when reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and P**T are defined as products of elementary reflectors H(i) or G(i) respectively. If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q is of order M: if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n columns of Q, where m >= n >= k; if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an M-by-M matrix. If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T is of order N: if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m rows of P**T, where n >= m >= k; if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as an N-by-N matrix. Arguments ========= VECT (input) CHARACTER*1 Specifies whether the matrix Q or the matrix P**T is required, as defined in the transformation applied by DGEBRD: = 'Q': generate Q; = 'P': generate P**T. M (input) INTEGER The number of rows of the matrix Q or P**T to be returned. M >= 0. N (input) INTEGER The number of columns of the matrix Q or P**T to be returned. N >= 0. If VECT = 'Q', M >= N >= min(M,K); if VECT = 'P', N >= M >= min(N,K). K (input) INTEGER If VECT = 'Q', the number of columns in the original M-by-K matrix reduced by DGEBRD. If VECT = 'P', the number of rows in the original K-by-N matrix reduced by DGEBRD. K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the vectors which define the elementary reflectors, as returned by DGEBRD. On exit, the M-by-N matrix Q or P**T. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (min(M,K)) if VECT = 'Q' (min(N,K)) if VECT = 'P' TAU(i) must contain the scalar factor of the elementary reflector H(i) or G(i), which determines Q or P**T, as returned by DGEBRD in its array argument TAUQ or TAUP. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,min(M,N)). For optimum performance LWORK >= min(M,N)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; wantq = lsame_(vect, "Q"); mn = min(*m,*n); lquery = *lwork == -1; if (! wantq && ! lsame_(vect, "P")) { *info = -1; } else if (*m < 0) { *info = -2; } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && ( *m > *n || *m < min(*n,*k))) { *info = -3; } else if (*k < 0) { *info = -4; } else if (*lda < max(1,*m)) { *info = -6; } else if (*lwork < max(1,mn) && ! lquery) { *info = -9; } if (*info == 0) { if (wantq) { nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, ( ftnlen)1); } else { nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, ( ftnlen)1); } lwkopt = max(1,mn) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGBR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { work[1] = 1.; return 0; } if (wantq) { /* Form Q, determined by a call to DGEBRD to reduce an m-by-k matrix */ if (*m >= *k) { /* If m >= k, assume m >= n >= k */ dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & iinfo); } else { /* If m < k, assume m = n Shift the vectors which define the elementary reflectors one column to the right, and set the first row and column of Q to those of the unit matrix */ for (j = *m; j >= 2; --j) { a[j * a_dim1 + 1] = 0.; i__1 = *m; for (i__ = j + 1; i__ <= i__1; ++i__) { a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; /* L10: */ } /* L20: */ } a[a_dim1 + 1] = 1.; i__1 = *m; for (i__ = 2; i__ <= i__1; ++i__) { a[i__ + a_dim1] = 0.; /* L30: */ } if (*m > 1) { /* Form Q(2:m,2:m) */ i__1 = *m - 1; i__2 = *m - 1; i__3 = *m - 1; dorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[ 1], &work[1], lwork, &iinfo); } } } else { /* Form P', determined by a call to DGEBRD to reduce a k-by-n matrix */ if (*k < *n) { /* If k < n, assume k <= m <= n */ dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & iinfo); } else { /* If k >= n, assume m = n Shift the vectors which define the elementary reflectors one row downward, and set the first row and column of P' to those of the unit matrix */ a[a_dim1 + 1] = 1.; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { a[i__ + a_dim1] = 0.; /* L40: */ } i__1 = *n; for (j = 2; j <= i__1; ++j) { for (i__ = j - 1; i__ >= 2; --i__) { a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1]; /* L50: */ } a[j * a_dim1 + 1] = 0.; /* L60: */ } if (*n > 1) { /* Form P'(2:n,2:n) */ i__1 = *n - 1; i__2 = *n - 1; i__3 = *n - 1; dorglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[ 1], &work[1], lwork, &iinfo); } } } work[1] = (doublereal) lwkopt; return 0; /* End of DORGBR */ } /* dorgbr_ */ /* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer i__, j, iinfo, nb, nh; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGHR generates a real orthogonal matrix Q which is defined as the product of IHI-ILO elementary reflectors of order N, as returned by DGEHRD: Q = H(ilo) H(ilo+1) . . . H(ihi-1). Arguments ========= N (input) INTEGER The order of the matrix Q. N >= 0. ILO (input) INTEGER IHI (input) INTEGER ILO and IHI must have the same values as in the previous call of DGEHRD. Q is equal to the unit matrix except in the submatrix Q(ilo+1:ihi,ilo+1:ihi). 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the vectors which define the elementary reflectors, as returned by DGEHRD. On exit, the N-by-N orthogonal matrix Q. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (N-1) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEHRD. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= IHI-ILO. For optimum performance LWORK >= (IHI-ILO)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nh = *ihi - *ilo; lquery = *lwork == -1; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*lwork < max(1,nh) && ! lquery) { *info = -8; } if (*info == 0) { nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, ( ftnlen)1); lwkopt = max(1,nh) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGHR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { work[1] = 1.; return 0; } /* Shift the vectors which define the elementary reflectors one column to the right, and set the first ilo and the last n-ihi rows and columns to those of the unit matrix */ i__1 = *ilo + 1; for (j = *ihi; j >= i__1; --j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } i__2 = *ihi; for (i__ = j + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; /* L20: */ } i__2 = *n; for (i__ = *ihi + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } i__1 = *ilo; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L50: */ } a[j + j * a_dim1] = 1.; /* L60: */ } i__1 = *n; for (j = *ihi + 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L70: */ } a[j + j * a_dim1] = 1.; /* L80: */ } if (nh > 0) { /* Generate Q(ilo+1:ihi,ilo+1:ihi) */ dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[* ilo], &work[1], lwork, &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DORGHR */ } /* dorghr_ */ /* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1; /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGL2 generates an m by n real matrix Q with orthonormal rows, which is defined as the first m rows of a product of k elementary reflectors of order n Q = H(k) . . . H(2) H(1) as returned by DGELQF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. N >= M. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. M >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. On exit, the m-by-n matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. WORK (workspace) DOUBLE PRECISION array, dimension (M) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < *m) { *info = -2; } else if (*k < 0 || *k > *m) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGL2", &i__1); return 0; } /* Quick return if possible */ if (*m <= 0) { return 0; } if (*k < *m) { /* Initialise rows k+1:m to rows of the unit matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (l = *k + 1; l <= i__2; ++l) { a[l + j * a_dim1] = 0.; /* L10: */ } if (j > *k && j <= *m) { a[j + j * a_dim1] = 1.; } /* L20: */ } } for (i__ = *k; i__ >= 1; --i__) { /* Apply H(i) to A(i:m,i:n) from the right */ if (i__ < *n) { if (i__ < *m) { a[i__ + i__ * a_dim1] = 1.; i__1 = *m - i__; i__2 = *n - i__ + 1; dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, & tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); } i__1 = *n - i__; d__1 = -tau[i__]; dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda); } a[i__ + i__ * a_dim1] = 1. - tau[i__]; /* Set A(i,1:i-1) to zero */ i__1 = i__ - 1; for (l = 1; l <= i__1; ++l) { a[i__ + l * a_dim1] = 0.; /* L30: */ } /* L40: */ } return 0; /* End of DORGL2 */ } /* dorgl2_ */ /* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j, l, nbmin, iinfo; extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb, ki, kk; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGLQ generates an M-by-N real matrix Q with orthonormal rows, which is defined as the first M rows of a product of K elementary reflectors of order N Q = H(k) . . . H(2) H(1) as returned by DGELQF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. N >= M. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. M >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. On exit, the M-by-N matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,M). For optimum performance LWORK >= M*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = max(1,*m) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < *m) { *info = -2; } else if (*k < 0 || *k > *m) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*lwork < max(1,*m) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGLQ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m <= 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *m; if (nb > 1 && nb < *k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *m; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < *k && nx < *k) { /* Use blocked code after the last block. The first kk rows are handled by the block method. */ ki = (*k - nx - 1) / nb * nb; /* Computing MIN */ i__1 = *k, i__2 = ki + nb; kk = min(i__1,i__2); /* Set A(kk+1:m,1:kk) to zero. */ i__1 = kk; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = kk + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } /* L20: */ } } else { kk = 0; } /* Use unblocked code for the last or only block. */ if (kk < *m) { i__1 = *m - kk; i__2 = *n - kk; i__3 = *k - kk; dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & tau[kk + 1], &work[1], &iinfo); } if (kk > 0) { /* Use blocked code */ i__1 = -nb; for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { /* Computing MIN */ i__2 = nb, i__3 = *k - i__ + 1; ib = min(i__2,i__3); if (i__ + ib <= *m) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__2 = *n - i__ + 1; dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H' to A(i+ib:m,i:n) from the right */ i__2 = *m - i__ - ib + 1; i__3 = *n - i__ + 1; dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, & i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + 1], &ldwork); } /* Apply H' to columns i:n of current block */ i__2 = *n - i__ + 1; dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & work[1], &iinfo); /* Set columns 1:i-1 of current block to zero */ i__2 = i__ - 1; for (j = 1; j <= i__2; ++j) { i__3 = i__ + ib - 1; for (l = i__; l <= i__3; ++l) { a[l + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } /* L50: */ } } work[1] = (doublereal) iws; return 0; /* End of DORGLQ */ } /* dorglq_ */ /* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j, l, nbmin, iinfo; extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb, ki, kk; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGQR generates an M-by-N real matrix Q with orthonormal columns, which is defined as the first N columns of a product of K elementary reflectors of order M Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. M >= N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. N >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. On exit, the M-by-N matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = max(1,*n) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0 || *n > *m) { *info = -2; } else if (*k < 0 || *k > *n) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*lwork < max(1,*n) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGQR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n <= 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *n; if (nb > 1 && nb < *k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < *k && nx < *k) { /* Use blocked code after the last block. The first kk columns are handled by the block method. */ ki = (*k - nx - 1) / nb * nb; /* Computing MIN */ i__1 = *k, i__2 = ki + nb; kk = min(i__1,i__2); /* Set A(1:kk,kk+1:n) to zero. */ i__1 = *n; for (j = kk + 1; j <= i__1; ++j) { i__2 = kk; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } /* L20: */ } } else { kk = 0; } /* Use unblocked code for the last or only block. */ if (kk < *n) { i__1 = *m - kk; i__2 = *n - kk; i__3 = *k - kk; dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & tau[kk + 1], &work[1], &iinfo); } if (kk > 0) { /* Use blocked code */ i__1 = -nb; for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { /* Computing MIN */ i__2 = nb, i__3 = *k - i__ + 1; ib = min(i__2,i__3); if (i__ + ib <= *n) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__2 = *m - i__ + 1; dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H to A(i:m,i+ib:n) from the left */ i__2 = *m - i__ + 1; i__3 = *n - i__ - ib + 1; dlarfb_("Left", "No transpose", "Forward", "Columnwise", & i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, & work[ib + 1], &ldwork); } /* Apply H to rows i:m of current block */ i__2 = *m - i__ + 1; dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & work[1], &iinfo); /* Set rows 1:i-1 of current block to zero */ i__2 = i__ + ib - 1; for (j = i__; j <= i__2; ++j) { i__3 = i__ - 1; for (l = 1; l <= i__3; ++l) { a[l + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } /* L50: */ } } work[1] = (doublereal) iws; return 0; /* End of DORGQR */ } /* dorgqr_ */ /* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORM2L overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQLF in the last k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQLF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORM2L", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; } else { mi = *m; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(1:m-k+i,1:n) */ mi = *m - *k + i__; } else { /* H(i) is applied to C(1:m,1:n-k+i) */ ni = *n - *k + i__; } /* Apply H(i) */ aii = a[nq - *k + i__ + i__ * a_dim1]; a[nq - *k + i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[ c_offset], ldc, &work[1]); a[nq - *k + i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORM2L */ } /* dorm2l_ */ /* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, ic, jc, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORM2R overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORM2R", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && ! notran || ! left && notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H(i) is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H(i) */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[ ic + jc * c_dim1], ldc, &work[1]); a[i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORM2R */ } /* dorm2r_ */ /* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; extern logical lsame_(char *, char *); static integer iinfo, i1, i2, nb, mi, ni, nq, nw; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static logical notran; extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static logical applyq; static char transt[1]; static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': P * C C * P TRANS = 'T': P**T * C C * P**T Here Q and P**T are the orthogonal matrices determined by DGEBRD when reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and P**T are defined as products of elementary reflectors H(i) and G(i) respectively. Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the order of the orthogonal matrix Q or P**T that is applied. If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: if nq >= k, Q = H(1) H(2) . . . H(k); if nq < k, Q = H(1) H(2) . . . H(nq-1). If VECT = 'P', A is assumed to have been a K-by-NQ matrix: if k < nq, P = G(1) G(2) . . . G(k); if k >= nq, P = G(1) G(2) . . . G(nq-1). Arguments ========= VECT (input) CHARACTER*1 = 'Q': apply Q or Q**T; = 'P': apply P or P**T. SIDE (input) CHARACTER*1 = 'L': apply Q, Q**T, P or P**T from the Left; = 'R': apply Q, Q**T, P or P**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q or P; = 'T': Transpose, apply Q**T or P**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER If VECT = 'Q', the number of columns in the original matrix reduced by DGEBRD. If VECT = 'P', the number of rows in the original matrix reduced by DGEBRD. K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,min(nq,K)) if VECT = 'Q' (LDA,nq) if VECT = 'P' The vectors which define the elementary reflectors H(i) and G(i), whose products determine the matrices Q and P, as returned by DGEBRD. LDA (input) INTEGER The leading dimension of the array A. If VECT = 'Q', LDA >= max(1,nq); if VECT = 'P', LDA >= max(1,min(nq,K)). TAU (input) DOUBLE PRECISION array, dimension (min(nq,K)) TAU(i) must contain the scalar factor of the elementary reflector H(i) or G(i) which determines Q or P, as returned by DGEBRD in the array argument TAUQ or TAUP. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q or P*C or P**T*C or C*P or C*P**T. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; applyq = lsame_(vect, "Q"); left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q or P and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! applyq && ! lsame_(vect, "P")) { *info = -1; } else if (! left && ! lsame_(side, "R")) { *info = -2; } else if (! notran && ! lsame_(trans, "T")) { *info = -3; } else if (*m < 0) { *info = -4; } else if (*n < 0) { *info = -5; } else if (*k < 0) { *info = -6; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = 1, i__2 = min(nq,*k); if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) { *info = -8; } else if (*ldc < max(1,*m)) { *info = -11; } else if (*lwork < max(1,nw) && ! lquery) { *info = -13; } } if (*info == 0) { if (applyq) { if (left) { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *m - 1; i__2 = *m - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *n - 1; i__2 = *n - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } } else { if (left) { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *m - 1; i__2 = *m - 1; nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *n - 1; i__2 = *n - 1; nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } } lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMBR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ work[1] = 1.; if (*m == 0 || *n == 0) { return 0; } if (applyq) { /* Apply Q */ if (nq >= *k) { /* Q was determined by a call to DGEBRD with nq >= k */ dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], lwork, &iinfo); } else if (nq > 1) { /* Q was determined by a call to DGEBRD with nq < k */ if (left) { mi = *m - 1; ni = *n; i1 = 2; i2 = 1; } else { mi = *m; ni = *n - 1; i1 = 1; i2 = 2; } i__1 = nq - 1; dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1] , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); } } else { /* Apply P */ if (notran) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } if (nq > *k) { /* P was determined by a call to DGEBRD with nq > k */ dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], lwork, &iinfo); } else if (nq > 1) { /* P was determined by a call to DGEBRD with nq <= k */ if (left) { mi = *m - 1; ni = *n; i1 = 2; i2 = 1; } else { mi = *m; ni = *n - 1; i1 = 1; i2 = 2; } i__1 = nq - 1; dormlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, & iinfo); } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMBR */ } /* dormbr_ */ /* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, ic, jc, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORML2 overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L', (LDA,N) if SIDE = 'R' The i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,K). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,*k)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORML2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H(i) is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H(i) */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[ ic + jc * c_dim1], ldc, &work[1]); a[i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORML2 */ } /* dorml2_ */ /* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, ic, jc, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork; static char transt[1]; static integer lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMLQ overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L', (LDA,N) if SIDE = 'R' The i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,K). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,*k)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMLQ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { work[1] = 1.; return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } if (notran) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__4 = nq - i__ + 1; dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], t, &c__65); if (left) { /* H or H' is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H or H' is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H or H' */ dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], ldc, &work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMLQ */ } /* dormlq_ */ /* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMQL overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQLF in the last k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQLF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = max(1,*n); } else { nq = *n; nw = max(1,*m); } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info == 0) { if (*m == 0 || *n == 0) { lwkopt = 1; } else { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, (ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = nw * nb; } work[1] = (doublereal) lwkopt; if (*lwork < nw && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DORMQL", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; } else { mi = *m; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i+ib-1) . . . H(i+1) H(i) */ i__4 = nq - *k + i__ + ib - 1; dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1] , lda, &tau[i__], t, &c__65); if (left) { /* H or H' is applied to C(1:m-k+i+ib-1,1:n) */ mi = *m - *k + i__ + ib - 1; } else { /* H or H' is applied to C(1:m,1:n-k+i+ib-1) */ ni = *n - *k + i__ + ib - 1; } /* Apply H or H' */ dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, & work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMQL */ } /* dormql_ */ /* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, ic, jc, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMQR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMQR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { work[1] = 1.; return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && ! notran || ! left && notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__4 = nq - i__ + 1; dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], t, &c__65) ; if (left) { /* H or H' is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H or H' is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H or H' */ dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], ldc, &work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMQR */ } /* dormqr_ */ /* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m, integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; extern logical lsame_(char *, char *); static integer iinfo, i1; static logical upper; static integer i2, nb, mi, ni, nq, nw; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMTR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix of order nq, with nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of nq-1 elementary reflectors, as returned by DSYTRD: if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. UPLO (input) CHARACTER*1 = 'U': Upper triangle of A contains elementary reflectors from DSYTRD; = 'L': Lower triangle of A contains elementary reflectors from DSYTRD. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L' (LDA,N) if SIDE = 'R' The vectors which define the elementary reflectors, as returned by DSYTRD. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. TAU (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DSYTRD. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); upper = lsame_(uplo, "U"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! upper && ! lsame_(uplo, "L")) { *info = -2; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T")) { *info = -3; } else if (*m < 0) { *info = -4; } else if (*n < 0) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { if (upper) { if (left) { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *m - 1; i__3 = *m - 1; nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *n - 1; i__3 = *n - 1; nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } } else { if (left) { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *m - 1; i__3 = *m - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *n - 1; i__3 = *n - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } } lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__2 = -(*info); xerbla_("DORMTR", &i__2); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || nq == 1) { work[1] = 1.; return 0; } if (left) { mi = *m - 1; ni = *n; } else { mi = *m; ni = *n - 1; } if (upper) { /* Q was determined by a call to DSYTRD with UPLO = 'U' */ i__2 = nq - 1; dormql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, & tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo); } else { /* Q was determined by a call to DSYTRD with UPLO = 'L' */ if (left) { i1 = 2; i2 = 1; } else { i1 = 1; i2 = 2; } i__2 = nq - 1; dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], & c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DORMTR */ } /* dormtr_ */ /* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer * lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static integer j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal ajj; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DPOTF2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U' * U , if UPLO = 'U', or A = L * L', if UPLO = 'L', where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U'*U or A = L*L'. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -k, the k-th argument had an illegal value > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DPOTF2", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (upper) { /* Compute the Cholesky factorization A = U'*U. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Compute U(J,J) and test for non-positive-definiteness. */ i__2 = j - 1; ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1], &c__1); if (ajj <= 0.) { a[j + j * a_dim1] = ajj; goto L30; } ajj = sqrt(ajj); a[j + j * a_dim1] = ajj; /* Compute elements J+1:N of row J. */ if (j < *n) { i__2 = j - 1; i__3 = *n - j; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(j + 1) * a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b15, & a[j + (j + 1) * a_dim1], lda); i__2 = *n - j; d__1 = 1. / ajj; dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda); } /* L10: */ } } else { /* Compute the Cholesky factorization A = L*L'. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Compute L(J,J) and test for non-positive-definiteness. */ i__2 = j - 1; ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j + a_dim1], lda); if (ajj <= 0.) { a[j + j * a_dim1] = ajj; goto L30; } ajj = sqrt(ajj); a[j + j * a_dim1] = ajj; /* Compute elements J+1:N of column J. */ if (j < *n) { i__2 = *n - j; i__3 = j - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[j + 1 + a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + 1 + j * a_dim1], &c__1); i__2 = *n - j; d__1 = 1. / ajj; dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); } /* L20: */ } } goto L40; L30: *info = j; L40: return 0; /* End of DPOTF2 */ } /* dpotf2_ */ /* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer * lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *), dpotf2_(char *, integer *, doublereal *, integer *, integer *); static integer jb, nb; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DPOTRF computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**T * U, if UPLO = 'U', or A = L * L**T, if UPLO = 'L', where U is an upper triangular matrix and L is lower triangular. This is the block version of the algorithm, calling Level 3 BLAS. Arguments ========= UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**T*U or A = L*L**T. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, the leading minor of order i is not positive definite, and the factorization could not be completed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DPOTRF", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Determine the block size for this environment. */ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); if (nb <= 1 || nb >= *n) { /* Use unblocked code. */ dpotf2_(uplo, n, &a[a_offset], lda, info); } else { /* Use blocked code. */ if (upper) { /* Compute the Cholesky factorization A = U'*U. */ i__1 = *n; i__2 = nb; for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Update and factorize the current diagonal block and test for non-positive-definiteness. Computing MIN */ i__3 = nb, i__4 = *n - j + 1; jb = min(i__3,i__4); i__3 = j - 1; dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b151, &a[j * a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda); dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info); if (*info != 0) { goto L30; } if (j + jb <= *n) { /* Compute the current block row. */ i__3 = *n - j - jb + 1; i__4 = j - 1; dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, & c_b151, &a[j * a_dim1 + 1], lda, &a[(j + jb) * a_dim1 + 1], lda, &c_b15, &a[j + (j + jb) * a_dim1], lda); i__3 = *n - j - jb + 1; dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, & i__3, &c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda); } /* L10: */ } } else { /* Compute the Cholesky factorization A = L*L'. */ i__2 = *n; i__1 = nb; for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { /* Update and factorize the current diagonal block and test for non-positive-definiteness. Computing MIN */ i__3 = nb, i__4 = *n - j + 1; jb = min(i__3,i__4); i__3 = j - 1; dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b151, &a[j + a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda); dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info); if (*info != 0) { goto L30; } if (j + jb <= *n) { /* Compute the current block column. */ i__3 = *n - j - jb + 1; i__4 = j - 1; dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, & c_b151, &a[j + jb + a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + jb + j * a_dim1], lda); i__3 = *n - j - jb + 1; dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, & jb, &c_b15, &a[j + j * a_dim1], lda, &a[j + jb + j * a_dim1], lda); } /* L20: */ } } } goto L40; L30: *info = *info + j - 1; L40: return 0; /* End of DPOTRF */ } /* dpotrf_ */ /* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__, doublereal *e, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *iwork, integer *liwork, integer *info) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double log(doublereal); integer pow_ii(integer *, integer *); double sqrt(doublereal); /* Local variables */ static doublereal tiny; static integer i__, j, k, m; static doublereal p; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer lwmin; extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer start, ii; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static integer finish; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, integer *), dlasrt_(char *, integer *, doublereal *, integer *); static integer liwmin, icompz; extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static doublereal orgnrm; static logical lquery; static integer smlsiz, storez, strtrw, lgn; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTEDC computes all eigenvalues and, optionally, eigenvectors of a symmetric tridiagonal matrix using the divide and conquer method. The eigenvectors of a full or band real symmetric matrix can also be found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to tridiagonal form. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. See DLAED3 for details. Arguments ========= COMPZ (input) CHARACTER*1 = 'N': Compute eigenvalues only. = 'I': Compute eigenvectors of tridiagonal matrix also. = 'V': Compute eigenvectors of original dense symmetric matrix also. On entry, Z contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) On entry, if COMPZ = 'V', then Z contains the orthogonal matrix used in the reduction to tridiagonal form. On exit, if INFO = 0, then if COMPZ = 'V', Z contains the orthonormal eigenvectors of the original symmetric matrix, and if COMPZ = 'I', Z contains the orthonormal eigenvectors of the symmetric tridiagonal matrix. If COMPZ = 'N', then Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1. If eigenvectors are desired, then LDZ >= max(1,N). WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If COMPZ = 'N' or N <= 1 then LWORK must be at least 1. If COMPZ = 'V' and N > 1 then LWORK must be at least ( 1 + 3*N + 2*N*lg N + 3*N**2 ), where lg( N ) = smallest integer k such that 2**k >= N. If COMPZ = 'I' and N > 1 then LWORK must be at least ( 1 + 4*N + N**2 ). Note that for COMPZ = 'I' or 'V', then if N is less than or equal to the minimum divide size, usually 25, then LWORK need only be max(1,2*(N-1)). If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. LIWORK (input) INTEGER The dimension of the array IWORK. If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1. If COMPZ = 'V' and N > 1 then LIWORK must be at least ( 6 + 6*N + 5*N*lg N ). If COMPZ = 'I' and N > 1 then LIWORK must be at least ( 3 + 5*N ). Note that for COMPZ = 'I' or 'V', then if N is less than or equal to the minimum divide size, usually 25, then LIWORK need only be 1. If LIWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the IWORK array, returns this value as the first entry of the IWORK array, and no error message related to LIWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; --iwork; /* Function Body */ *info = 0; lquery = *lwork == -1 || *liwork == -1; if (lsame_(compz, "N")) { icompz = 0; } else if (lsame_(compz, "V")) { icompz = 1; } else if (lsame_(compz, "I")) { icompz = 2; } else { icompz = -1; } if (icompz < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) { *info = -6; } if (*info == 0) { /* Compute the workspace requirements */ smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); if (*n <= 1 || icompz == 0) { liwmin = 1; lwmin = 1; } else if (*n <= smlsiz) { liwmin = 1; lwmin = *n - 1 << 1; } else { lgn = (integer) (log((doublereal) (*n)) / log(2.)); if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (icompz == 1) { /* Computing 2nd power */ i__1 = *n; lwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3; liwmin = *n * 6 + 6 + *n * 5 * lgn; } else if (icompz == 2) { /* Computing 2nd power */ i__1 = *n; lwmin = (*n << 2) + 1 + i__1 * i__1; liwmin = *n * 5 + 3; } } work[1] = (doublereal) lwmin; iwork[1] = liwmin; if (*lwork < lwmin && ! lquery) { *info = -8; } else if (*liwork < liwmin && ! lquery) { *info = -10; } } if (*info != 0) { i__1 = -(*info); xerbla_("DSTEDC", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { if (icompz != 0) { z__[z_dim1 + 1] = 1.; } return 0; } /* If the following conditional clause is removed, then the routine will use the Divide and Conquer routine to compute only the eigenvalues, which requires (3N + 3N**2) real workspace and (2 + 5N + 2N lg(N)) integer workspace. Since on many architectures DSTERF is much faster than any other algorithm for finding eigenvalues only, it is used here as the default. If the conditional clause is removed, then information on the size of workspace needs to be changed. If COMPZ = 'N', use DSTERF to compute the eigenvalues. */ if (icompz == 0) { dsterf_(n, &d__[1], &e[1], info); goto L50; } /* If N is smaller than the minimum divide size (SMLSIZ+1), then solve the problem with another solver. */ if (*n <= smlsiz) { dsteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info); } else { /* If COMPZ = 'V', the Z matrix must be stored elsewhere for later use. */ if (icompz == 1) { storez = *n * *n + 1; } else { storez = 1; } if (icompz == 2) { dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); } /* Scale. */ orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { goto L50; } eps = EPSILON; start = 1; /* while ( START <= N ) */ L10: if (start <= *n) { /* Let FINISH be the position of the next subdiagonal entry such that E( FINISH ) <= TINY or FINISH = N if no such subdiagonal exists. The matrix identified by the elements between START and FINISH constitutes an independent sub-problem. */ finish = start; L20: if (finish < *n) { tiny = eps * sqrt((d__1 = d__[finish], abs(d__1))) * sqrt(( d__2 = d__[finish + 1], abs(d__2))); if ((d__1 = e[finish], abs(d__1)) > tiny) { ++finish; goto L20; } } /* (Sub) Problem determined. Compute its size and solve it. */ m = finish - start + 1; if (m == 1) { start = finish + 1; goto L10; } if (m > smlsiz) { /* Scale. */ orgnrm = dlanst_("M", &m, &d__[start], &e[start]); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &m, &c__1, &d__[ start], &m, info); i__1 = m - 1; i__2 = m - 1; dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &i__1, &c__1, &e[ start], &i__2, info); if (icompz == 1) { strtrw = 1; } else { strtrw = start; } dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[strtrw + start * z_dim1], ldz, &work[1], n, &work[storez], & iwork[1], info); if (*info != 0) { *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m + 1) + start - 1; goto L50; } /* Scale back. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &m, &c__1, &d__[ start], &m, info); } else { if (icompz == 1) { /* Since QR won't update a Z matrix which is larger than the length of D, we must solve the sub-problem in a workspace and then multiply back into Z. */ dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, & work[m * m + 1], info); dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[ storez], n); dgemm_("N", "N", n, &m, &m, &c_b15, &work[storez], n, & work[1], &m, &c_b29, &z__[start * z_dim1 + 1], ldz); } else if (icompz == 2) { dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start * z_dim1], ldz, &work[1], info); } else { dsterf_(&m, &d__[start], &e[start], info); } if (*info != 0) { *info = start * (*n + 1) + finish; goto L50; } } start = finish + 1; goto L10; } /* endwhile If the problem split any number of times, then the eigenvalues will not be properly ordered. Here we permute the eigenvalues (and the associated eigenvectors) into ascending order. */ if (m != *n) { if (icompz == 0) { /* Use Quick Sort */ dlasrt_("I", n, &d__[1], info); } else { /* Use Selection Sort to minimize swaps of eigenvectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; k = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] < p) { k = j; p = d__[j]; } /* L30: */ } if (k != i__) { d__[k] = d__[i__]; d__[i__] = p; dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], &c__1); } /* L40: */ } } } } L50: work[1] = (doublereal) lwmin; iwork[1] = liwmin; return 0; /* End of DSTEDC */ } /* dstedc_ */ /* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__, doublereal *e, doublereal *z__, integer *ldz, doublereal *work, integer *info) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer lend, jtot; extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal b, c__, f, g; static integer i__, j, k, l, m; static doublereal p, r__, s; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal anorm; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer l1; extern /* Subroutine */ int dlaev2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static integer lendm1, lendp1; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ii; static integer mm, iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal safmin; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal safmax; extern /* Subroutine */ int xerbla_(char *, integer *); extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static integer lendsv; static doublereal ssfmin; static integer nmaxit, icompz; static doublereal ssfmax; static integer lm1, mm1, nm1; static doublereal rt1, rt2, eps; static integer lsv; static doublereal tst, eps2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTEQR computes all eigenvalues and, optionally, eigenvectors of a symmetric tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a full or band symmetric matrix can also be found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to tridiagonal form. Arguments ========= COMPZ (input) CHARACTER*1 = 'N': Compute eigenvalues only. = 'V': Compute eigenvalues and eigenvectors of the original symmetric matrix. On entry, Z must contain the orthogonal matrix used to reduce the original matrix to tridiagonal form. = 'I': Compute eigenvalues and eigenvectors of the tridiagonal matrix. Z is initialized to the identity matrix. N (input) INTEGER The order of the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the (n-1) subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) On entry, if COMPZ = 'V', then Z contains the orthogonal matrix used in the reduction to tridiagonal form. On exit, if INFO = 0, then if COMPZ = 'V', Z contains the orthonormal eigenvectors of the original symmetric matrix, and if COMPZ = 'I', Z contains the orthonormal eigenvectors of the symmetric tridiagonal matrix. If COMPZ = 'N', then Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1, and if eigenvectors are desired, then LDZ >= max(1,N). WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) If COMPZ = 'N', then WORK is not referenced. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm has failed to find all the eigenvalues in a total of 30*N iterations; if INFO = i, then i elements of E have not converged to zero; on exit, D and E contain the elements of a symmetric tridiagonal matrix which is orthogonally similar to the original matrix. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; if (lsame_(compz, "N")) { icompz = 0; } else if (lsame_(compz, "V")) { icompz = 1; } else if (lsame_(compz, "I")) { icompz = 2; } else { icompz = -1; } if (icompz < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("DSTEQR", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { if (icompz == 2) { z__[z_dim1 + 1] = 1.; } return 0; } /* Determine the unit roundoff and over/underflow thresholds. */ eps = EPSILON; /* Computing 2nd power */ d__1 = eps; eps2 = d__1 * d__1; safmin = SAFEMINIMUM; safmax = 1. / safmin; ssfmax = sqrt(safmax) / 3.; ssfmin = sqrt(safmin) / eps2; /* Compute the eigenvalues and eigenvectors of the tridiagonal matrix. */ if (icompz == 2) { dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); } nmaxit = *n * 30; jtot = 0; /* Determine where the matrix splits and choose QL or QR iteration for each block, according to whether top or bottom diagonal element is smaller. */ l1 = 1; nm1 = *n - 1; L10: if (l1 > *n) { goto L160; } if (l1 > 1) { e[l1 - 1] = 0.; } if (l1 <= nm1) { i__1 = nm1; for (m = l1; m <= i__1; ++m) { tst = (d__1 = e[m], abs(d__1)); if (tst == 0.) { goto L30; } if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) { e[m] = 0.; goto L30; } /* L20: */ } } m = *n; L30: l = l1; lsv = l; lend = m; lendsv = lend; l1 = m + 1; if (lend == l) { goto L10; } /* Scale submatrix in rows and columns L to LEND */ i__1 = lend - l + 1; anorm = dlanst_("I", &i__1, &d__[l], &e[l]); iscale = 0; if (anorm == 0.) { goto L10; } if (anorm > ssfmax) { iscale = 1; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info); } else if (anorm < ssfmin) { iscale = 2; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info); } /* Choose between QL and QR iteration */ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { lend = lsv; l = lendsv; } if (lend > l) { /* QL Iteration Look for small subdiagonal element. */ L40: if (l != lend) { lendm1 = lend - 1; i__1 = lendm1; for (m = l; m <= i__1; ++m) { /* Computing 2nd power */ d__2 = (d__1 = e[m], abs(d__1)); tst = d__2 * d__2; if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m + 1], abs(d__2)) + safmin) { goto L60; } /* L50: */ } } m = lend; L60: if (m < lend) { e[m] = 0.; } p = d__[l]; if (m == l) { goto L80; } /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its eigensystem. */ if (m == l + 1) { if (icompz > 0) { dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s); work[l] = c__; work[*n - 1 + l] = s; dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], & z__[l * z_dim1 + 1], ldz); } else { dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2); } d__[l] = rt1; d__[l + 1] = rt2; e[l] = 0.; l += 2; if (l <= lend) { goto L40; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l + 1] - p) / (e[l] * 2.); r__ = dlapy2_(&g, &c_b15); g = d__[m] - p + e[l] / (g + d_sign(&r__, &g)); s = 1.; c__ = 1.; p = 0.; /* Inner loop */ mm1 = m - 1; i__1 = l; for (i__ = mm1; i__ >= i__1; --i__) { f = s * e[i__]; b = c__ * e[i__]; dlartg_(&g, &f, &c__, &s, &r__); if (i__ != m - 1) { e[i__ + 1] = r__; } g = d__[i__ + 1] - p; r__ = (d__[i__] - g) * s + c__ * 2. * b; p = s * r__; d__[i__ + 1] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = -s; } /* L70: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = m - l + 1; dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l * z_dim1 + 1], ldz); } d__[l] -= p; e[l] = g; goto L40; /* Eigenvalue found. */ L80: d__[l] = p; ++l; if (l <= lend) { goto L40; } goto L140; } else { /* QR Iteration Look for small superdiagonal element. */ L90: if (l != lend) { lendp1 = lend + 1; i__1 = lendp1; for (m = l; m >= i__1; --m) { /* Computing 2nd power */ d__2 = (d__1 = e[m - 1], abs(d__1)); tst = d__2 * d__2; if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - 1], abs(d__2)) + safmin) { goto L110; } /* L100: */ } } m = lend; L110: if (m > lend) { e[m - 1] = 0.; } p = d__[l]; if (m == l) { goto L130; } /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its eigensystem. */ if (m == l - 1) { if (icompz > 0) { dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s) ; work[m] = c__; work[*n - 1 + m] = s; dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], & z__[(l - 1) * z_dim1 + 1], ldz); } else { dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2); } d__[l - 1] = rt1; d__[l] = rt2; e[l - 1] = 0.; l += -2; if (l >= lend) { goto L90; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l - 1] - p) / (e[l - 1] * 2.); r__ = dlapy2_(&g, &c_b15); g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g)); s = 1.; c__ = 1.; p = 0.; /* Inner loop */ lm1 = l - 1; i__1 = lm1; for (i__ = m; i__ <= i__1; ++i__) { f = s * e[i__]; b = c__ * e[i__]; dlartg_(&g, &f, &c__, &s, &r__); if (i__ != m) { e[i__ - 1] = r__; } g = d__[i__] - p; r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b; p = s * r__; d__[i__] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = s; } /* L120: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = l - m + 1; dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m * z_dim1 + 1], ldz); } d__[l] -= p; e[lm1] = g; goto L90; /* Eigenvalue found. */ L130: d__[l] = p; --l; if (l >= lend) { goto L90; } goto L140; } /* Undo scaling if necessary */ L140: if (iscale == 1) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info); i__1 = lendsv - lsv; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, info); } else if (iscale == 2) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info); i__1 = lendsv - lsv; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, info); } /* Check for no convergence to an eigenvalue after a total of N*MAXIT iterations. */ if (jtot < nmaxit) { goto L10; } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L150: */ } goto L190; /* Order eigenvalues and eigenvectors. */ L160: if (icompz == 0) { /* Use Quick Sort */ dlasrt_("I", n, &d__[1], info); } else { /* Use Selection Sort to minimize swaps of eigenvectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; k = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] < p) { k = j; p = d__[j]; } /* L170: */ } if (k != i__) { d__[k] = d__[i__]; d__[i__] = p; dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], &c__1); } /* L180: */ } } L190: return 0; /* End of DSTEQR */ } /* dsteqr_ */ /* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e, integer *info) { /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal oldc; static integer lend, jtot; extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal c__; static integer i__, l, m; static doublereal p, gamma, r__, s, alpha, sigma, anorm; static integer l1; extern doublereal dlapy2_(doublereal *, doublereal *); static doublereal bb; static integer iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static doublereal oldgam, safmin; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal safmax; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static integer lendsv; static doublereal ssfmin; static integer nmaxit; static doublereal ssfmax, rt1, rt2, eps, rte; static integer lsv; static doublereal eps2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTERF computes all eigenvalues of a symmetric tridiagonal matrix using the Pal-Walker-Kahan variant of the QL or QR algorithm. Arguments ========= N (input) INTEGER The order of the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the (n-1) subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm failed to find all of the eigenvalues in a total of 30*N iterations; if INFO = i, then i elements of E have not converged to zero. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --e; --d__; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n < 0) { *info = -1; i__1 = -(*info); xerbla_("DSTERF", &i__1); return 0; } if (*n <= 1) { return 0; } /* Determine the unit roundoff for this environment. */ eps = EPSILON; /* Computing 2nd power */ d__1 = eps; eps2 = d__1 * d__1; safmin = SAFEMINIMUM; safmax = 1. / safmin; ssfmax = sqrt(safmax) / 3.; ssfmin = sqrt(safmin) / eps2; /* Compute the eigenvalues of the tridiagonal matrix. */ nmaxit = *n * 30; sigma = 0.; jtot = 0; /* Determine where the matrix splits and choose QL or QR iteration for each block, according to whether top or bottom diagonal element is smaller. */ l1 = 1; L10: if (l1 > *n) { goto L170; } if (l1 > 1) { e[l1 - 1] = 0.; } i__1 = *n - 1; for (m = l1; m <= i__1; ++m) { if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) { e[m] = 0.; goto L30; } /* L20: */ } m = *n; L30: l = l1; lsv = l; lend = m; lendsv = lend; l1 = m + 1; if (lend == l) { goto L10; } /* Scale submatrix in rows and columns L to LEND */ i__1 = lend - l + 1; anorm = dlanst_("I", &i__1, &d__[l], &e[l]); iscale = 0; if (anorm > ssfmax) { iscale = 1; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info); } else if (anorm < ssfmin) { iscale = 2; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info); } i__1 = lend - 1; for (i__ = l; i__ <= i__1; ++i__) { /* Computing 2nd power */ d__1 = e[i__]; e[i__] = d__1 * d__1; /* L40: */ } /* Choose between QL and QR iteration */ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { lend = lsv; l = lendsv; } if (lend >= l) { /* QL Iteration Look for small subdiagonal element. */ L50: if (l != lend) { i__1 = lend - 1; for (m = l; m <= i__1; ++m) { if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m + 1], abs(d__1))) { goto L70; } /* L60: */ } } m = lend; L70: if (m < lend) { e[m] = 0.; } p = d__[l]; if (m == l) { goto L90; } /* If remaining matrix is 2 by 2, use DLAE2 to compute its eigenvalues. */ if (m == l + 1) { rte = sqrt(e[l]); dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2); d__[l] = rt1; d__[l + 1] = rt2; e[l] = 0.; l += 2; if (l <= lend) { goto L50; } goto L150; } if (jtot == nmaxit) { goto L150; } ++jtot; /* Form shift. */ rte = sqrt(e[l]); sigma = (d__[l + 1] - p) / (rte * 2.); r__ = dlapy2_(&sigma, &c_b15); sigma = p - rte / (sigma + d_sign(&r__, &sigma)); c__ = 1.; s = 0.; gamma = d__[m] - sigma; p = gamma * gamma; /* Inner loop */ i__1 = l; for (i__ = m - 1; i__ >= i__1; --i__) { bb = e[i__]; r__ = p + bb; if (i__ != m - 1) { e[i__ + 1] = s * r__; } oldc = c__; c__ = p / r__; s = bb / r__; oldgam = gamma; alpha = d__[i__]; gamma = c__ * (alpha - sigma) - s * oldgam; d__[i__ + 1] = oldgam + (alpha - gamma); if (c__ != 0.) { p = gamma * gamma / c__; } else { p = oldc * bb; } /* L80: */ } e[l] = s * p; d__[l] = sigma + gamma; goto L50; /* Eigenvalue found. */ L90: d__[l] = p; ++l; if (l <= lend) { goto L50; } goto L150; } else { /* QR Iteration Look for small superdiagonal element. */ L100: i__1 = lend + 1; for (m = l; m >= i__1; --m) { if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m - 1], abs(d__1))) { goto L120; } /* L110: */ } m = lend; L120: if (m > lend) { e[m - 1] = 0.; } p = d__[l]; if (m == l) { goto L140; } /* If remaining matrix is 2 by 2, use DLAE2 to compute its eigenvalues. */ if (m == l - 1) { rte = sqrt(e[l - 1]); dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2); d__[l] = rt1; d__[l - 1] = rt2; e[l - 1] = 0.; l += -2; if (l >= lend) { goto L100; } goto L150; } if (jtot == nmaxit) { goto L150; } ++jtot; /* Form shift. */ rte = sqrt(e[l - 1]); sigma = (d__[l - 1] - p) / (rte * 2.); r__ = dlapy2_(&sigma, &c_b15); sigma = p - rte / (sigma + d_sign(&r__, &sigma)); c__ = 1.; s = 0.; gamma = d__[m] - sigma; p = gamma * gamma; /* Inner loop */ i__1 = l - 1; for (i__ = m; i__ <= i__1; ++i__) { bb = e[i__]; r__ = p + bb; if (i__ != m) { e[i__ - 1] = s * r__; } oldc = c__; c__ = p / r__; s = bb / r__; oldgam = gamma; alpha = d__[i__ + 1]; gamma = c__ * (alpha - sigma) - s * oldgam; d__[i__] = oldgam + (alpha - gamma); if (c__ != 0.) { p = gamma * gamma / c__; } else { p = oldc * bb; } /* L130: */ } e[l - 1] = s * p; d__[l] = sigma + gamma; goto L100; /* Eigenvalue found. */ L140: d__[l] = p; --l; if (l >= lend) { goto L100; } goto L150; } /* Undo scaling if necessary */ L150: if (iscale == 1) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info); } if (iscale == 2) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info); } /* Check for no convergence to an eigenvalue after a total of N*MAXIT iterations. */ if (jtot < nmaxit) { goto L10; } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L160: */ } goto L180; /* Sort eigenvalues in increasing order. */ L170: dlasrt_("I", n, &d__[1], info); L180: return 0; /* End of DSTERF */ } /* dsterf_ */ /* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal * a, integer *lda, doublereal *w, doublereal *work, integer *lwork, integer *iwork, integer *liwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer inde; static doublereal anrm, rmin, rmax; static integer lopt; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal sigma; extern logical lsame_(char *, char *); static integer iinfo, lwmin, liopt; static logical lower, wantz; static integer indwk2, llwrk2; static integer iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dstedc_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *), dlacpy_( char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum; static integer indtau; extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, integer *); extern doublereal dlansy_(char *, char *, integer *, doublereal *, integer *, doublereal *); static integer indwrk, liwmin; extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer llwork; static doublereal smlnum; static logical lquery; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYEVD computes all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A. If eigenvectors are desired, it uses a divide and conquer algorithm. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Because of large use of BLAS of level 3, DSYEVD needs N**2 more workspace than DSYEVX. Arguments ========= JOBZ (input) CHARACTER*1 = 'N': Compute eigenvalues only; = 'V': Compute eigenvalues and eigenvectors. UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA, N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A. On exit, if JOBZ = 'V', then if INFO = 0, A contains the orthonormal eigenvectors of the matrix A. If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') or the upper triangle (if UPLO='U') of A, including the diagonal, is destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). W (output) DOUBLE PRECISION array, dimension (N) If INFO = 0, the eigenvalues in ascending order. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If N <= 1, LWORK must be at least 1. If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1. If JOBZ = 'V' and N > 1, LWORK must be at least 1 + 6*N + 2*N**2. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal sizes of the WORK and IWORK arrays, returns these values as the first entries of the WORK and IWORK arrays, and no error message related to LWORK or LIWORK is issued by XERBLA. IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. LIWORK (input) INTEGER The dimension of the array IWORK. If N <= 1, LIWORK must be at least 1. If JOBZ = 'N' and N > 1, LIWORK must be at least 1. If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. If LIWORK = -1, then a workspace query is assumed; the routine only calculates the optimal sizes of the WORK and IWORK arrays, returns these values as the first entries of the WORK and IWORK arrays, and no error message related to LWORK or LIWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i and JOBZ = 'N', then the algorithm failed to converge; i off-diagonal elements of an intermediate tridiagonal form did not converge to zero; if INFO = i and JOBZ = 'V', then the algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. Modified description of INFO. Sven, 16 Feb 05. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --w; --work; --iwork; /* Function Body */ wantz = lsame_(jobz, "V"); lower = lsame_(uplo, "L"); lquery = *lwork == -1 || *liwork == -1; *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (lower || lsame_(uplo, "U"))) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } if (*info == 0) { if (*n <= 1) { liwmin = 1; lwmin = 1; lopt = lwmin; liopt = liwmin; } else { if (wantz) { liwmin = *n * 5 + 3; /* Computing 2nd power */ i__1 = *n; lwmin = *n * 6 + 1 + (i__1 * i__1 << 1); } else { liwmin = 1; lwmin = (*n << 1) + 1; } /* Computing MAX */ i__1 = lwmin, i__2 = (*n << 1) + ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); lopt = max(i__1,i__2); liopt = liwmin; } work[1] = (doublereal) lopt; iwork[1] = liopt; if (*lwork < lwmin && ! lquery) { *info = -8; } else if (*liwork < liwmin && ! lquery) { *info = -10; } } if (*info != 0) { i__1 = -(*info); xerbla_("DSYEVD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { w[1] = a[a_dim1 + 1]; if (wantz) { a[a_dim1 + 1] = 1.; } return 0; } /* Get machine constants. */ safmin = SAFEMINIMUM; eps = PRECISION; smlnum = safmin / eps; bignum = 1. / smlnum; rmin = sqrt(smlnum); rmax = sqrt(bignum); /* Scale matrix to allowable range, if necessary. */ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]); iscale = 0; if (anrm > 0. && anrm < rmin) { iscale = 1; sigma = rmin / anrm; } else if (anrm > rmax) { iscale = 1; sigma = rmax / anrm; } if (iscale == 1) { dlascl_(uplo, &c__0, &c__0, &c_b15, &sigma, n, n, &a[a_offset], lda, info); } /* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */ inde = 1; indtau = inde + *n; indwrk = indtau + *n; llwork = *lwork - indwrk + 1; indwk2 = indwrk + *n * *n; llwrk2 = *lwork - indwk2 + 1; dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], & work[indwrk], &llwork, &iinfo); lopt = (integer) ((*n << 1) + work[indwrk]); /* For eigenvalues only, call DSTERF. For eigenvectors, first call DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the tridiagonal matrix, then call DORMTR to multiply it by the Householder transformations stored in A. */ if (! wantz) { dsterf_(n, &w[1], &work[inde], info); } else { dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], & llwrk2, &iwork[1], liwork, info); dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[ indwrk], n, &work[indwk2], &llwrk2, &iinfo); dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda); /* Computing MAX Computing 2nd power */ i__3 = *n; i__1 = lopt, i__2 = *n * 6 + 1 + (i__3 * i__3 << 1); lopt = max(i__1,i__2); } /* If matrix was scaled, then rescale eigenvalues appropriately. */ if (iscale == 1) { d__1 = 1. / sigma; dscal_(n, &d__1, &w[1], &c__1); } work[1] = (doublereal) lopt; iwork[1] = liopt; return 0; /* End of DSYEVD */ } /* dsyevd_ */ /* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal taui; extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer i__; static doublereal alpha; extern logical lsame_(char *, char *); extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer * ); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal form T by an orthogonal similarity transformation: Q' * A * Q = T. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored: = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if UPLO = 'U', the diagonal and first superdiagonal of A are overwritten by the corresponding elements of the tridiagonal matrix T, and the elements above the first superdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the diagonal and first subdiagonal of A are over- written by the corresponding elements of the tridiagonal matrix T, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). D (output) DOUBLE PRECISION array, dimension (N) The diagonal elements of the tridiagonal matrix T: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix T: E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n-1) . . . H(2) H(1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in A(1:i-1,i+1), and tau in TAU(i). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(n-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), and tau in TAU(i). The contents of A on exit are illustrated by the following examples with n = 5: if UPLO = 'U': if UPLO = 'L': ( d e v2 v3 v4 ) ( d ) ( d e v3 v4 ) ( e d ) ( d e v4 ) ( v1 e d ) ( d e ) ( v1 v2 e d ) ( d ) ( v1 v2 v3 e d ) where d and e denote diagonal and off-diagonal elements of T, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tau; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DSYTD2", &i__1); return 0; } /* Quick return if possible */ if (*n <= 0) { return 0; } if (upper) { /* Reduce the upper triangle of A */ for (i__ = *n - 1; i__ >= 1; --i__) { /* Generate elementary reflector H(i) = I - tau * v * v' to annihilate A(1:i-1,i+1) */ dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui); e[i__] = a[i__ + (i__ + 1) * a_dim1]; if (taui != 0.) { /* Apply H(i) from both sides to A(1:i,1:i) */ a[i__ + (i__ + 1) * a_dim1] = 1.; /* Compute x := tau * A * v storing x in TAU(1:i) */ dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * a_dim1 + 1], &c__1, &c_b29, &tau[1], &c__1) ; /* Compute w := x - 1/2 * tau * (x'*v) * v */ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1], &c__1); daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ 1], &c__1); /* Apply the transformation as a rank-2 update: A := A - v * w' - w * v' */ dsyr2_(uplo, &i__, &c_b151, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[1], &c__1, &a[a_offset], lda); a[i__ + (i__ + 1) * a_dim1] = e[i__]; } d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1]; tau[i__] = taui; /* L10: */ } d__[1] = a[a_dim1 + 1]; } else { /* Reduce the lower triangle of A */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) = I - tau * v * v' to annihilate A(i+2:n,i) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &taui); e[i__] = a[i__ + 1 + i__ * a_dim1]; if (taui != 0.) { /* Apply H(i) from both sides to A(i+1:n,i+1:n) */ a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute x := tau * A * v storing y in TAU(i:n-1) */ i__2 = *n - i__; dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &tau[ i__], &c__1); /* Compute w := x - 1/2 * tau * (x'*v) * v */ i__2 = *n - i__; alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *n - i__; daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &c__1); /* Apply the transformation as a rank-2 update: A := A - v * w' - w * v' */ i__2 = *n - i__; dsyr2_(uplo, &i__2, &c_b151, &a[i__ + 1 + i__ * a_dim1], & c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda); a[i__ + 1 + i__ * a_dim1] = e[i__]; } d__[i__] = a[i__ + i__ * a_dim1]; tau[i__] = taui; /* L20: */ } d__[*n] = a[*n + *n * a_dim1]; } return 0; /* End of DSYTD2 */ } /* dsytd2_ */ /* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal * work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); static integer nbmin, iinfo; static logical upper; extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nb, kk, nx; extern /* Subroutine */ int dlatrd_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYTRD reduces a real symmetric matrix A to real symmetric tridiagonal form T by an orthogonal similarity transformation: Q**T * A * Q = T. Arguments ========= UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if UPLO = 'U', the diagonal and first superdiagonal of A are overwritten by the corresponding elements of the tridiagonal matrix T, and the elements above the first superdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the diagonal and first subdiagonal of A are over- written by the corresponding elements of the tridiagonal matrix T, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). D (output) DOUBLE PRECISION array, dimension (N) The diagonal elements of the tridiagonal matrix T: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix T: E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= 1. For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n-1) . . . H(2) H(1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in A(1:i-1,i+1), and tau in TAU(i). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(n-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), and tau in TAU(i). The contents of A on exit are illustrated by the following examples with n = 5: if UPLO = 'U': if UPLO = 'L': ( d e v2 v3 v4 ) ( d ) ( d e v3 v4 ) ( e d ) ( d e v4 ) ( v1 e d ) ( d e ) ( v1 v2 e d ) ( d ) ( v1 v2 v3 e d ) where d and e denote diagonal and off-diagonal elements of T, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tau; --work; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); lquery = *lwork == -1; if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } else if (*lwork < 1 && ! lquery) { *info = -9; } if (*info == 0) { /* Determine the block size. */ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DSYTRD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { work[1] = 1.; return 0; } nx = *n; iws = 1; if (nb > 1 && nb < *n) { /* Determine when to cross over from blocked to unblocked code (last block is always handled by unblocked code). Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *n) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: determine the minimum value of NB, and reduce NB or force use of unblocked code by setting NX = N. Computing MAX */ i__1 = *lwork / ldwork; nb = max(i__1,1); nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (nb < nbmin) { nx = *n; } } } else { nx = *n; } } else { nb = 1; } if (upper) { /* Reduce the upper triangle of A. Columns 1:kk are handled by the unblocked method. */ kk = *n - (*n - nx + nb - 1) / nb * nb; i__1 = kk + 1; i__2 = -nb; for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Reduce columns i:i+nb-1 to tridiagonal form and form the matrix W which is needed to update the unreduced part of the matrix */ i__3 = i__ + nb - 1; dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], & work[1], &ldwork); /* Update the unreduced submatrix A(1:i-1,1:i-1), using an update of the form: A := A - V*W' - W*V' */ i__3 = i__ - 1; dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ * a_dim1 + 1], lda, &work[1], &ldwork, &c_b15, &a[a_offset], lda); /* Copy superdiagonal elements back into A, and diagonal elements into D */ i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j - 1 + j * a_dim1] = e[j - 1]; d__[j] = a[j + j * a_dim1]; /* L10: */ } /* L20: */ } /* Use unblocked code to reduce the last or only block */ dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo); } else { /* Reduce the lower triangle of A */ i__2 = *n - nx; i__1 = nb; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Reduce columns i:i+nb-1 to tridiagonal form and form the matrix W which is needed to update the unreduced part of the matrix */ i__3 = *n - i__ + 1; dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], & tau[i__], &work[1], &ldwork); /* Update the unreduced submatrix A(i+ib:n,i+ib:n), using an update of the form: A := A - V*W' - W*V' */ i__3 = *n - i__ - nb + 1; dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ + nb + i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b15, &a[ i__ + nb + (i__ + nb) * a_dim1], lda); /* Copy subdiagonal elements back into A, and diagonal elements into D */ i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + 1 + j * a_dim1] = e[j]; d__[j] = a[j + j * a_dim1]; /* L30: */ } /* L40: */ } /* Use unblocked code to reduce the last or only block */ i__1 = *n - i__ + 1; dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &tau[i__], &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DSYTRD */ } /* dsytrd_ */ /* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select, integer *n, doublereal *t, integer *ldt, doublereal *vl, integer * ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m, doublereal *work, integer *info) { /* System generated locals */ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta, emax; static logical pair; extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static logical allv; static integer ierr; static doublereal unfl, ovfl, smin; static logical over; static doublereal vmax; static integer jnxt, i__, j, k; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal scale, x[4] /* was [2][2] */; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static doublereal remax; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static logical leftv, bothv; extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static doublereal vcrit; static logical somev; static integer j1, j2, n2; static doublereal xnorm; extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal * , doublereal *, integer *, doublereal *, doublereal *, integer *), dlabad_(doublereal *, doublereal *); static integer ii, ki; static integer ip, is; static doublereal wi; extern integer idamax_(integer *, doublereal *, integer *); static doublereal wr; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum; static logical rightv; static doublereal smlnum, rec, ulp; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DTREVC computes some or all of the right and/or left eigenvectors of a real upper quasi-triangular matrix T. Matrices of this type are produced by the Schur factorization of a real general matrix: A = Q*T*Q**T, as computed by DHSEQR. The right eigenvector x and the left eigenvector y of T corresponding to an eigenvalue w are defined by: T*x = w*x, (y**H)*T = w*(y**H) where y**H denotes the conjugate transpose of y. The eigenvalues are not input to this routine, but are read directly from the diagonal blocks of T. This routine returns the matrices X and/or Y of right and left eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y are the matrices of right and left eigenvectors of A. Arguments ========= SIDE (input) CHARACTER*1 = 'R': compute right eigenvectors only; = 'L': compute left eigenvectors only; = 'B': compute both right and left eigenvectors. HOWMNY (input) CHARACTER*1 = 'A': compute all right and/or left eigenvectors; = 'B': compute all right and/or left eigenvectors, backtransformed by the matrices in VR and/or VL; = 'S': compute selected right and/or left eigenvectors, as indicated by the logical array SELECT. SELECT (input/output) LOGICAL array, dimension (N) If HOWMNY = 'S', SELECT specifies the eigenvectors to be computed. If w(j) is a real eigenvalue, the corresponding real eigenvector is computed if SELECT(j) is .TRUE.. If w(j) and w(j+1) are the real and imaginary parts of a complex eigenvalue, the corresponding complex eigenvector is computed if either SELECT(j) or SELECT(j+1) is .TRUE., and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to .FALSE.. Not referenced if HOWMNY = 'A' or 'B'. N (input) INTEGER The order of the matrix T. N >= 0. T (input) DOUBLE PRECISION array, dimension (LDT,N) The upper quasi-triangular matrix T in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must contain an N-by-N matrix Q (usually the orthogonal matrix Q of Schur vectors returned by DHSEQR). On exit, if SIDE = 'L' or 'B', VL contains: if HOWMNY = 'A', the matrix Y of left eigenvectors of T; if HOWMNY = 'B', the matrix Q*Y; if HOWMNY = 'S', the left eigenvectors of T specified by SELECT, stored consecutively in the columns of VL, in the same order as their eigenvalues. A complex eigenvector corresponding to a complex eigenvalue is stored in two consecutive columns, the first holding the real part, and the second the imaginary part. Not referenced if SIDE = 'R'. LDVL (input) INTEGER The leading dimension of the array VL. LDVL >= 1, and if SIDE = 'L' or 'B', LDVL >= N. VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must contain an N-by-N matrix Q (usually the orthogonal matrix Q of Schur vectors returned by DHSEQR). On exit, if SIDE = 'R' or 'B', VR contains: if HOWMNY = 'A', the matrix X of right eigenvectors of T; if HOWMNY = 'B', the matrix Q*X; if HOWMNY = 'S', the right eigenvectors of T specified by SELECT, stored consecutively in the columns of VR, in the same order as their eigenvalues. A complex eigenvector corresponding to a complex eigenvalue is stored in two consecutive columns, the first holding the real part and the second the imaginary part. Not referenced if SIDE = 'L'. LDVR (input) INTEGER The leading dimension of the array VR. LDVR >= 1, and if SIDE = 'R' or 'B', LDVR >= N. MM (input) INTEGER The number of columns in the arrays VL and/or VR. MM >= M. M (output) INTEGER The number of columns in the arrays VL and/or VR actually used to store the eigenvectors. If HOWMNY = 'A' or 'B', M is set to N. Each selected real eigenvector occupies one column and each selected complex eigenvector occupies two columns. WORK (workspace) DOUBLE PRECISION array, dimension (3*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The algorithm used in this program is basically backward (forward) substitution, with scaling to make the the code robust against possible overflow. Each eigenvector is normalized so that the element of largest magnitude has magnitude 1; here the magnitude of a complex number (x,y) is taken to be |x| + |y|. ===================================================================== Decode and test the input parameters */ /* Parameter adjustments */ --select; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; vl_dim1 = *ldvl; vl_offset = 1 + vl_dim1 * 1; vl -= vl_offset; vr_dim1 = *ldvr; vr_offset = 1 + vr_dim1 * 1; vr -= vr_offset; --work; /* Function Body */ bothv = lsame_(side, "B"); rightv = lsame_(side, "R") || bothv; leftv = lsame_(side, "L") || bothv; allv = lsame_(howmny, "A"); over = lsame_(howmny, "B"); somev = lsame_(howmny, "S"); *info = 0; if (! rightv && ! leftv) { *info = -1; } else if (! allv && ! over && ! somev) { *info = -2; } else if (*n < 0) { *info = -4; } else if (*ldt < max(1,*n)) { *info = -6; } else if (*ldvl < 1 || leftv && *ldvl < *n) { *info = -8; } else if (*ldvr < 1 || rightv && *ldvr < *n) { *info = -10; } else { /* Set M to the number of columns required to store the selected eigenvectors, standardize the array SELECT if necessary, and test MM. */ if (somev) { *m = 0; pair = FALSE_; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (pair) { pair = FALSE_; select[j] = FALSE_; } else { if (j < *n) { if (t[j + 1 + j * t_dim1] == 0.) { if (select[j]) { ++(*m); } } else { pair = TRUE_; if (select[j] || select[j + 1]) { select[j] = TRUE_; *m += 2; } } } else { if (select[*n]) { ++(*m); } } } /* L10: */ } } else { *m = *n; } if (*mm < *m) { *info = -11; } } if (*info != 0) { i__1 = -(*info); xerbla_("DTREVC", &i__1); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* Set the constants to control overflow. */ unfl = SAFEMINIMUM; ovfl = 1. / unfl; dlabad_(&unfl, &ovfl); ulp = PRECISION; smlnum = unfl * (*n / ulp); bignum = (1. - ulp) / smlnum; /* Compute 1-norm of each column of strictly upper triangular part of T to control overflow in triangular solver. */ work[1] = 0.; i__1 = *n; for (j = 2; j <= i__1; ++j) { work[j] = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1)); /* L20: */ } /* L30: */ } /* Index IP is used to specify the real or complex eigenvalue: IP = 0, real eigenvalue, 1, first of conjugate complex pair: (wr,wi) -1, second of conjugate complex pair: (wr,wi) */ n2 = *n << 1; if (rightv) { /* Compute right eigenvectors. */ ip = 0; is = *m; for (ki = *n; ki >= 1; --ki) { if (ip == 1) { goto L130; } if (ki == 1) { goto L40; } if (t[ki + (ki - 1) * t_dim1] == 0.) { goto L40; } ip = -1; L40: if (somev) { if (ip == 0) { if (! select[ki]) { goto L130; } } else { if (! select[ki - 1]) { goto L130; } } } /* Compute the KI-th eigenvalue (WR,WI). */ wr = t[ki + ki * t_dim1]; wi = 0.; if (ip != 0) { wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2))); } /* Computing MAX */ d__1 = ulp * (abs(wr) + abs(wi)); smin = max(d__1,smlnum); if (ip == 0) { /* Real right eigenvector */ work[ki + *n] = 1.; /* Form right-hand side */ i__1 = ki - 1; for (k = 1; k <= i__1; ++k) { work[k + *n] = -t[k + ki * t_dim1]; /* L50: */ } /* Solve the upper quasi-triangular system: (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK. */ jnxt = ki - 1; for (j = ki - 1; j >= 1; --j) { if (j > jnxt) { goto L60; } j1 = j; j2 = j; jnxt = j - 1; if (j > 1) { if (t[j + (j - 1) * t_dim1] != 0.) { j1 = j - 1; jnxt = j - 2; } } if (j1 == j2) { /* 1-by-1 diagonal block */ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale X(1,1) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { if (work[j] > bignum / xnorm) { x[0] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); } work[j + *n] = x[0]; /* Update right-hand side */ i__1 = j - 1; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); } else { /* 2-by-2 diagonal block */ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b15, &t[j - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & work[j - 1 + *n], n, &wr, &c_b29, x, &c__2, & scale, &xnorm, &ierr); /* Scale X(1,1) and X(2,1) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { /* Computing MAX */ d__1 = work[j - 1], d__2 = work[j]; beta = max(d__1,d__2); if (beta > bignum / xnorm) { x[0] /= xnorm; x[1] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); } work[j - 1 + *n] = x[0]; work[j + *n] = x[1]; /* Update right-hand side */ i__1 = j - 2; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[*n + 1], &c__1); i__1 = j - 2; d__1 = -x[1]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); } L60: ; } /* Copy the vector x or Q*x to VR and normalize. */ if (! over) { dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], & c__1); ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1); remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1)); dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); i__1 = *n; for (k = ki + 1; k <= i__1; ++k) { vr[k + is * vr_dim1] = 0.; /* L70: */ } } else { if (ki > 1) { i__1 = ki - 1; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[*n + 1], &c__1, &work[ki + *n], &vr[ki * vr_dim1 + 1], &c__1); } ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1); remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1)); dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); } } else { /* Complex right eigenvector. Initial solve [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0. [ (T(KI,KI-1) T(KI,KI) ) ] */ if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[ ki + (ki - 1) * t_dim1], abs(d__2))) { work[ki - 1 + *n] = 1.; work[ki + n2] = wi / t[ki - 1 + ki * t_dim1]; } else { work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1]; work[ki + n2] = 1.; } work[ki + *n] = 0.; work[ki - 1 + n2] = 0.; /* Form right-hand side */ i__1 = ki - 2; for (k = 1; k <= i__1; ++k) { work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) * t_dim1]; work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1]; /* L80: */ } /* Solve upper quasi-triangular system: (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2) */ jnxt = ki - 2; for (j = ki - 2; j >= 1; --j) { if (j > jnxt) { goto L90; } j1 = j; j2 = j; jnxt = j - 1; if (j > 1) { if (t[j + (j - 1) * t_dim1] != 0.) { j1 = j - 1; jnxt = j - 2; } } if (j1 == j2) { /* 1-by-1 diagonal block */ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &wi, x, &c__2, &scale, &xnorm, & ierr); /* Scale X(1,1) and X(1,2) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { if (work[j] > bignum / xnorm) { x[0] /= xnorm; x[2] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); dscal_(&ki, &scale, &work[n2 + 1], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; /* Update the right-hand side */ i__1 = j - 1; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); i__1 = j - 1; d__1 = -x[2]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ n2 + 1], &c__1); } else { /* 2-by-2 diagonal block */ dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b15, &t[j - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & work[j - 1 + *n], n, &wr, &wi, x, &c__2, & scale, &xnorm, &ierr); /* Scale X to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { /* Computing MAX */ d__1 = work[j - 1], d__2 = work[j]; beta = max(d__1,d__2); if (beta > bignum / xnorm) { rec = 1. / xnorm; x[0] *= rec; x[2] *= rec; x[1] *= rec; x[3] *= rec; scale *= rec; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); dscal_(&ki, &scale, &work[n2 + 1], &c__1); } work[j - 1 + *n] = x[0]; work[j + *n] = x[1]; work[j - 1 + n2] = x[2]; work[j + n2] = x[3]; /* Update the right-hand side */ i__1 = j - 2; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[*n + 1], &c__1); i__1 = j - 2; d__1 = -x[1]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); i__1 = j - 2; d__1 = -x[2]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[n2 + 1], &c__1); i__1 = j - 2; d__1 = -x[3]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ n2 + 1], &c__1); } L90: ; } /* Copy the vector x or Q*x to VR and normalize. */ if (! over) { dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1 + 1], &c__1); dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], & c__1); emax = 0.; i__1 = ki; for (k = 1; k <= i__1; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1] , abs(d__1)) + (d__2 = vr[k + is * vr_dim1], abs(d__2)); emax = max(d__3,d__4); /* L100: */ } remax = 1. / emax; dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1); dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); i__1 = *n; for (k = ki + 1; k <= i__1; ++k) { vr[k + (is - 1) * vr_dim1] = 0.; vr[k + is * vr_dim1] = 0.; /* L110: */ } } else { if (ki > 2) { i__1 = ki - 2; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[( ki - 1) * vr_dim1 + 1], &c__1); i__1 = ki - 2; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[n2 + 1], &c__1, &work[ki + n2], &vr[ki * vr_dim1 + 1], &c__1); } else { dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1 + 1], &c__1); dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], & c__1); } emax = 0.; i__1 = *n; for (k = 1; k <= i__1; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1] , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1], abs(d__2)); emax = max(d__3,d__4); /* L120: */ } remax = 1. / emax; dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1); dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); } } --is; if (ip != 0) { --is; } L130: if (ip == 1) { ip = 0; } if (ip == -1) { ip = 1; } /* L140: */ } } if (leftv) { /* Compute left eigenvectors. */ ip = 0; is = 1; i__1 = *n; for (ki = 1; ki <= i__1; ++ki) { if (ip == -1) { goto L250; } if (ki == *n) { goto L150; } if (t[ki + 1 + ki * t_dim1] == 0.) { goto L150; } ip = 1; L150: if (somev) { if (! select[ki]) { goto L250; } } /* Compute the KI-th eigenvalue (WR,WI). */ wr = t[ki + ki * t_dim1]; wi = 0.; if (ip != 0) { wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2))); } /* Computing MAX */ d__1 = ulp * (abs(wr) + abs(wi)); smin = max(d__1,smlnum); if (ip == 0) { /* Real left eigenvector. */ work[ki + *n] = 1.; /* Form right-hand side */ i__2 = *n; for (k = ki + 1; k <= i__2; ++k) { work[k + *n] = -t[ki + k * t_dim1]; /* L160: */ } /* Solve the quasi-triangular system: (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK */ vmax = 1.; vcrit = bignum; jnxt = ki + 1; i__2 = *n; for (j = ki + 1; j <= i__2; ++j) { if (j < jnxt) { goto L170; } j1 = j; j2 = j; jnxt = j + 1; if (j < *n) { if (t[j + 1 + j * t_dim1] != 0.) { j2 = j + 1; jnxt = j + 2; } } if (j1 == j2) { /* 1-by-1 diagonal block Scale if necessary to avoid overflow when forming the right-hand side. */ if (work[j] > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 1; work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); /* Solve (T(J,J)-WR)'*X = WORK */ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); } work[j + *n] = x[0]; /* Computing MAX */ d__2 = (d__1 = work[j + *n], abs(d__1)); vmax = max(d__2,vmax); vcrit = bignum / vmax; } else { /* 2-by-2 diagonal block Scale if necessary to avoid overflow when forming the right-hand side. Computing MAX */ d__1 = work[j], d__2 = work[j + 1]; beta = max(d__1,d__2); if (beta > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 1; work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); i__3 = j - ki - 1; work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); /* Solve [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 ) [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) */ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); } work[j + *n] = x[0]; work[j + 1 + *n] = x[1]; /* Computing MAX */ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 = work[j + 1 + *n], abs(d__2)), d__3 = max( d__3,d__4); vmax = max(d__3,vmax); vcrit = bignum / vmax; } L170: ; } /* Copy the vector x or Q*x to VL and normalize. */ if (! over) { i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1; remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1)); i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); i__2 = ki - 1; for (k = 1; k <= i__2; ++k) { vl[k + is * vl_dim1] = 0.; /* L180: */ } } else { if (ki < *n) { i__2 = *n - ki; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 1) * vl_dim1 + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[ ki + *n], &vl[ki * vl_dim1 + 1], &c__1); } ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1); remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1)); dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); } } else { /* Complex left eigenvector. Initial solve: ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0. ((T(KI+1,KI) T(KI+1,KI+1)) ) */ if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 = t[ki + 1 + ki * t_dim1], abs(d__2))) { work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1]; work[ki + 1 + n2] = 1.; } else { work[ki + *n] = 1.; work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1]; } work[ki + 1 + *n] = 0.; work[ki + n2] = 0.; /* Form right-hand side */ i__2 = *n; for (k = ki + 2; k <= i__2; ++k) { work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1]; work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1] ; /* L190: */ } /* Solve complex quasi-triangular system: ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2 */ vmax = 1.; vcrit = bignum; jnxt = ki + 2; i__2 = *n; for (j = ki + 2; j <= i__2; ++j) { if (j < jnxt) { goto L200; } j1 = j; j2 = j; jnxt = j + 1; if (j < *n) { if (t[j + 1 + j * t_dim1] != 0.) { j2 = j + 1; jnxt = j + 2; } } if (j1 == j2) { /* 1-by-1 diagonal block Scale if necessary to avoid overflow when forming the right-hand side elements. */ if (work[j] > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + n2], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 2; work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); /* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */ d__1 = -wi; dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + n2], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; /* Computing MAX */ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 = work[j + n2], abs(d__2)), d__3 = max(d__3, d__4); vmax = max(d__3,vmax); vcrit = bignum / vmax; } else { /* 2-by-2 diagonal block Scale if necessary to avoid overflow when forming the right-hand side elements. Computing MAX */ d__1 = work[j], d__2 = work[j + 1]; beta = max(d__1,d__2); if (beta > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + n2], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 2; work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); i__3 = j - ki - 2; work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); /* Solve 2-by-2 complex linear equation ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B ([T(j+1,j) T(j+1,j+1)] ) */ d__1 = -wi; dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + n2], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; work[j + 1 + *n] = x[1]; work[j + 1 + n2] = x[3]; /* Computing MAX */ d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1, d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2) , d__2 = abs(x[3]), d__1 = max(d__1,d__2); vmax = max(d__1,vmax); vcrit = bignum / vmax; } L200: ; } /* Copy the vector x or Q*x to VL and normalize. */ if (! over) { i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) * vl_dim1], &c__1); emax = 0.; i__2 = *n; for (k = ki; k <= i__2; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs( d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1], abs(d__2)); emax = max(d__3,d__4); /* L220: */ } remax = 1. / emax; i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1) ; i__2 = ki - 1; for (k = 1; k <= i__2; ++k) { vl[k + is * vl_dim1] = 0.; vl[k + (is + 1) * vl_dim1] = 0.; /* L230: */ } } else { if (ki < *n - 1) { i__2 = *n - ki - 1; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[ ki + *n], &vl[ki * vl_dim1 + 1], &c__1); i__2 = *n - ki - 1; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[ ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], & c__1); } else { dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], & c__1); dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &c__1); } emax = 0.; i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs( d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1], abs(d__2)); emax = max(d__3,d__4); /* L240: */ } remax = 1. / emax; dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1); } } ++is; if (ip != 0) { ++is; } L250: if (ip == -1) { ip = 0; } if (ip == 1) { ip = -1; } /* L260: */ } } return 0; /* End of DTREVC */ } /* dtrevc_ */ /* Subroutine */ int dtrexc_(char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info) { /* System generated locals */ integer q_dim1, q_offset, t_dim1, t_offset, i__1; /* Local variables */ static integer here; extern logical lsame_(char *, char *); static logical wantq; extern /* Subroutine */ int dlaexc_(logical *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer nbnext, nbf, nbl; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DTREXC reorders the real Schur factorization of a real matrix A = Q*T*Q**T, so that the diagonal block of T with row index IFST is moved to row ILST. The real Schur form T is reordered by an orthogonal similarity transformation Z**T*T*Z, and optionally the matrix Q of Schur vectors is updated by postmultiplying it with Z. T must be in Schur canonical form (as returned by DHSEQR), that is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block has its diagonal elements equal and its off-diagonal elements of opposite sign. Arguments ========= COMPQ (input) CHARACTER*1 = 'V': update the matrix Q of Schur vectors; = 'N': do not update Q. N (input) INTEGER The order of the matrix T. N >= 0. T (input/output) DOUBLE PRECISION array, dimension (LDT,N) On entry, the upper quasi-triangular matrix T, in Schur Schur canonical form. On exit, the reordered upper quasi-triangular matrix, again in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, if COMPQ = 'V', the matrix Q of Schur vectors. On exit, if COMPQ = 'V', Q has been postmultiplied by the orthogonal transformation matrix Z which reorders T. If COMPQ = 'N', Q is not referenced. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). IFST (input/output) INTEGER ILST (input/output) INTEGER Specify the reordering of the diagonal blocks of T. The block with row index IFST is moved to row ILST, by a sequence of transpositions between adjacent blocks. On exit, if IFST pointed on entry to the second row of a 2-by-2 block, it is changed to point to the first row; ILST always points to the first row of the block in its final position (which may differ from its input value by +1 or -1). 1 <= IFST <= N; 1 <= ILST <= N. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value = 1: two adjacent blocks were too close to swap (the problem is very ill-conditioned); T may have been partially reordered, and ILST points to the first row of the current position of the block being moved. ===================================================================== Decode and test the input arguments. */ /* Parameter adjustments */ t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --work; /* Function Body */ *info = 0; wantq = lsame_(compq, "V"); if (! wantq && ! lsame_(compq, "N")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldt < max(1,*n)) { *info = -4; } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { *info = -6; } else if (*ifst < 1 || *ifst > *n) { *info = -7; } else if (*ilst < 1 || *ilst > *n) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DTREXC", &i__1); return 0; } /* Quick return if possible */ if (*n <= 1) { return 0; } /* Determine the first row of specified block and find out it is 1 by 1 or 2 by 2. */ if (*ifst > 1) { if (t[*ifst + (*ifst - 1) * t_dim1] != 0.) { --(*ifst); } } nbf = 1; if (*ifst < *n) { if (t[*ifst + 1 + *ifst * t_dim1] != 0.) { nbf = 2; } } /* Determine the first row of the final block and find out it is 1 by 1 or 2 by 2. */ if (*ilst > 1) { if (t[*ilst + (*ilst - 1) * t_dim1] != 0.) { --(*ilst); } } nbl = 1; if (*ilst < *n) { if (t[*ilst + 1 + *ilst * t_dim1] != 0.) { nbl = 2; } } if (*ifst == *ilst) { return 0; } if (*ifst < *ilst) { /* Update ILST */ if (nbf == 2 && nbl == 1) { --(*ilst); } if (nbf == 1 && nbl == 2) { ++(*ilst); } here = *ifst; L10: /* Swap block with next one below */ if (nbf == 1 || nbf == 2) { /* Current block either 1 by 1 or 2 by 2 */ nbnext = 1; if (here + nbf + 1 <= *n) { if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.) { nbnext = 2; } } dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, & nbf, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += nbnext; /* Test if 2 by 2 block breaks into two 1 by 1 blocks */ if (nbf == 2) { if (t[here + 1 + here * t_dim1] == 0.) { nbf = 3; } } } else { /* Current block consists of two 1 by 1 blocks each of which must be swapped individually */ nbnext = 1; if (here + 3 <= *n) { if (t[here + 3 + (here + 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here + 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & c__1, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } if (nbnext == 1) { /* Swap two 1 by 1 blocks, no problems possible */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &nbnext, &work[1], info); ++here; } else { /* Recompute NBNEXT in case 2 by 2 split */ if (t[here + 2 + (here + 1) * t_dim1] == 0.) { nbnext = 1; } if (nbnext == 2) { /* 2 by 2 Block did not split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += 2; } else { /* 2 by 2 Block did split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &c__1, &work[1], info); i__1 = here + 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__1, &c__1, &work[1], info); here += 2; } } } if (here < *ilst) { goto L10; } } else { here = *ifst; L20: /* Swap block with next one above */ if (nbf == 1 || nbf == 2) { /* Current block either 1 by 1 or 2 by 2 */ nbnext = 1; if (here >= 3) { if (t[here - 1 + (here - 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here - nbnext; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &nbf, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here -= nbnext; /* Test if 2 by 2 block breaks into two 1 by 1 blocks */ if (nbf == 2) { if (t[here + 1 + here * t_dim1] == 0.) { nbf = 3; } } } else { /* Current block consists of two 1 by 1 blocks each of which must be swapped individually */ nbnext = 1; if (here >= 3) { if (t[here - 1 + (here - 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here - nbnext; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &c__1, &work[1], info); if (*info != 0) { *ilst = here; return 0; } if (nbnext == 1) { /* Swap two 1 by 1 blocks, no problems possible */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &nbnext, &c__1, &work[1], info); --here; } else { /* Recompute NBNEXT in case 2 by 2 split */ if (t[here + (here - 1) * t_dim1] == 0.) { nbnext = 1; } if (nbnext == 2) { /* 2 by 2 Block did not split */ i__1 = here - 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__2, &c__1, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += -2; } else { /* 2 by 2 Block did split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &c__1, &work[1], info); i__1 = here - 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__1, &c__1, &work[1], info); here += -2; } } } if (here > *ilst) { goto L20; } } *ilst = here; return 0; /* End of DTREXC */ } /* dtrexc_ */ integer ieeeck_(integer *ispec, real *zero, real *one) { /* System generated locals */ integer ret_val; /* Local variables */ static real neginf, posinf, negzro, newzro, nan1, nan2, nan3, nan4, nan5, nan6; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= IEEECK is called from the ILAENV to verify that Infinity and possibly NaN arithmetic is safe (i.e. will not trap). Arguments ========= ISPEC (input) INTEGER Specifies whether to test just for inifinity arithmetic or whether to test for infinity and NaN arithmetic. = 0: Verify infinity arithmetic only. = 1: Verify infinity and NaN arithmetic. ZERO (input) REAL Must contain the value 0.0 This is passed to prevent the compiler from optimizing away this code. ONE (input) REAL Must contain the value 1.0 This is passed to prevent the compiler from optimizing away this code. RETURN VALUE: INTEGER = 0: Arithmetic failed to produce the correct answers = 1: Arithmetic produced the correct answers */ ret_val = 1; posinf = *one / *zero; if (posinf <= *one) { ret_val = 0; return ret_val; } neginf = -(*one) / *zero; if (neginf >= *zero) { ret_val = 0; return ret_val; } negzro = *one / (neginf + *one); if (negzro != *zero) { ret_val = 0; return ret_val; } neginf = *one / negzro; if (neginf >= *zero) { ret_val = 0; return ret_val; } newzro = negzro + *zero; if (newzro != *zero) { ret_val = 0; return ret_val; } posinf = *one / newzro; if (posinf <= *one) { ret_val = 0; return ret_val; } neginf *= posinf; if (neginf >= *zero) { ret_val = 0; return ret_val; } posinf *= posinf; if (posinf <= *one) { ret_val = 0; return ret_val; } /* Return if we were only asked to check infinity arithmetic */ if (*ispec == 0) { return ret_val; } nan1 = posinf + neginf; nan2 = posinf / neginf; nan3 = posinf / posinf; nan4 = posinf * *zero; nan5 = neginf * negzro; nan6 = nan5 * 0.f; if (nan1 == nan1) { ret_val = 0; return ret_val; } if (nan2 == nan2) { ret_val = 0; return ret_val; } if (nan3 == nan3) { ret_val = 0; return ret_val; } if (nan4 == nan4) { ret_val = 0; return ret_val; } if (nan5 == nan5) { ret_val = 0; return ret_val; } if (nan6 == nan6) { ret_val = 0; return ret_val; } return ret_val; } /* ieeeck_ */ integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1, integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen opts_len) { /* System generated locals */ integer ret_val; /* Builtin functions */ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); integer s_cmp(char *, char *, ftnlen, ftnlen); /* Local variables */ static integer i__; static logical cname; static integer nbmin; static logical sname; static char c1[1], c2[2], c3[3], c4[2]; static integer ic, nb; extern integer ieeeck_(integer *, real *, real *); static integer iz, nx; static char subnam[6]; extern integer iparmq_(integer *, char *, char *, integer *, integer *, integer *, integer *); /* -- LAPACK auxiliary routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. January 2007 Purpose ======= ILAENV is called from the LAPACK routines to choose problem-dependent parameters for the local environment. See ISPEC for a description of the parameters. ILAENV returns an INTEGER if ILAENV >= 0: ILAENV returns the value of the parameter specified by ISPEC if ILAENV < 0: if ILAENV = -k, the k-th argument had an illegal value. This version provides a set of parameters which should give good, but not optimal, performance on many of the currently available computers. Users are encouraged to modify this subroutine to set the tuning parameters for their particular machine using the option and problem size information in the arguments. This routine will not function correctly if it is converted to all lower case. Converting it to all upper case is allowed. Arguments ========= ISPEC (input) INTEGER Specifies the parameter to be returned as the value of ILAENV. = 1: the optimal blocksize; if this value is 1, an unblocked algorithm will give the best performance. = 2: the minimum block size for which the block routine should be used; if the usable block size is less than this value, an unblocked routine should be used. = 3: the crossover point (in a block routine, for N less than this value, an unblocked routine should be used) = 4: the number of shifts, used in the nonsymmetric eigenvalue routines (DEPRECATED) = 5: the minimum column dimension for blocking to be used; rectangular blocks must have dimension at least k by m, where k is given by ILAENV(2,...) and m by ILAENV(5,...) = 6: the crossover point for the SVD (when reducing an m by n matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds this value, a QR factorization is used first to reduce the matrix to a triangular form.) = 7: the number of processors = 8: the crossover point for the multishift QR method for nonsymmetric eigenvalue problems (DEPRECATED) = 9: maximum size of the subproblems at the bottom of the computation tree in the divide-and-conquer algorithm (used by xGELSD and xGESDD) =10: ieee NaN arithmetic can be trusted not to trap =11: infinity arithmetic can be trusted not to trap 12 <= ISPEC <= 16: xHSEQR or one of its subroutines, see IPARMQ for detailed explanation NAME (input) CHARACTER*(*) The name of the calling subroutine, in either upper case or lower case. OPTS (input) CHARACTER*(*) The character options to the subroutine NAME, concatenated into a single character string. For example, UPLO = 'U', TRANS = 'T', and DIAG = 'N' for a triangular routine would be specified as OPTS = 'UTN'. N1 (input) INTEGER N2 (input) INTEGER N3 (input) INTEGER N4 (input) INTEGER Problem dimensions for the subroutine NAME; these may not all be required. Further Details =============== The following conventions have been used when calling ILAENV from the LAPACK routines: 1) OPTS is a concatenation of all of the character options to subroutine NAME, in the same order that they appear in the argument list for NAME, even if they are not used in determining the value of the parameter specified by ISPEC. 2) The problem dimensions N1, N2, N3, N4 are specified in the order that they appear in the argument list for NAME. N1 is used first, N2 second, and so on, and unused problem dimensions are passed a value of -1. 3) The parameter value returned by ILAENV is checked for validity in the calling subroutine. For example, ILAENV is used to retrieve the optimal blocksize for STRTRI as follows: NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 ) IF( NB.LE.1 ) NB = MAX( 1, N ) ===================================================================== */ switch (*ispec) { case 1: goto L10; case 2: goto L10; case 3: goto L10; case 4: goto L80; case 5: goto L90; case 6: goto L100; case 7: goto L110; case 8: goto L120; case 9: goto L130; case 10: goto L140; case 11: goto L150; case 12: goto L160; case 13: goto L160; case 14: goto L160; case 15: goto L160; case 16: goto L160; } /* Invalid value for ISPEC */ ret_val = -1; return ret_val; L10: /* Convert NAME to upper case if the first character is lower case. */ ret_val = 1; s_copy(subnam, name__, (ftnlen)6, name_len); ic = *(unsigned char *)subnam; iz = 'Z'; if (iz == 90 || iz == 122) { /* ASCII character set */ if (ic >= 97 && ic <= 122) { *(unsigned char *)subnam = (char) (ic - 32); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 97 && ic <= 122) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); } /* L20: */ } } } else if (iz == 233 || iz == 169) { /* EBCDIC character set */ if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) { *(unsigned char *)subnam = (char) (ic + 64); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64); } /* L30: */ } } } else if (iz == 218 || iz == 250) { /* Prime machines: ASCII+128 */ if (ic >= 225 && ic <= 250) { *(unsigned char *)subnam = (char) (ic - 32); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 225 && ic <= 250) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); } /* L40: */ } } } *(unsigned char *)c1 = *(unsigned char *)subnam; sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D'; cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z'; if (! (cname || sname)) { return ret_val; } s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2); s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3); s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2); switch (*ispec) { case 1: goto L50; case 2: goto L60; case 3: goto L70; } L50: /* ISPEC = 1: block size In these examples, separate code is provided for setting NB for real and complex. We assume that NB will take the same value in single or double precision. */ nb = 1; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } else if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen) 3, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nb = 32; } else if (sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nb = 32; } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { if (*n4 <= 64) { nb = 1; } else { nb = 32; } } else { if (*n4 <= 64) { nb = 1; } else { nb = 32; } } } } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { if (*n2 <= 64) { nb = 1; } else { nb = 32; } } else { if (*n2 <= 64) { nb = 1; } else { nb = 32; } } } } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) { nb = 1; } } ret_val = nb; return ret_val; L60: /* ISPEC = 2: minimum block size */ nbmin = 2; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 8; } else { nbmin = 8; } } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nbmin = 2; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nbmin = 2; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } } ret_val = nbmin; return ret_val; L70: /* ISPEC = 3: crossover point */ nx = 0; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nx = 32; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nx = 32; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nx = 128; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nx = 128; } } } ret_val = nx; return ret_val; L80: /* ISPEC = 4: number of shifts (used by xHSEQR) */ ret_val = 6; return ret_val; L90: /* ISPEC = 5: minimum column dimension (not used) */ ret_val = 2; return ret_val; L100: /* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */ ret_val = (integer) ((real) min(*n1,*n2) * 1.6f); return ret_val; L110: /* ISPEC = 7: number of processors (not used) */ ret_val = 1; return ret_val; L120: /* ISPEC = 8: crossover point for multishift (used by xHSEQR) */ ret_val = 50; return ret_val; L130: /* ISPEC = 9: maximum size of the subproblems at the bottom of the computation tree in the divide-and-conquer algorithm (used by xGELSD and xGESDD) */ ret_val = 25; return ret_val; L140: /* ISPEC = 10: ieee NaN arithmetic can be trusted not to trap ILAENV = 0 */ ret_val = 1; if (ret_val == 1) { ret_val = ieeeck_(&c__0, &c_b4270, &c_b4271); } return ret_val; L150: /* ISPEC = 11: infinity arithmetic can be trusted not to trap ILAENV = 0 */ ret_val = 1; if (ret_val == 1) { ret_val = ieeeck_(&c__1, &c_b4270, &c_b4271); } return ret_val; L160: /* 12 <= ISPEC <= 16: xHSEQR or one of its subroutines. */ /*** FFF MODIF ***/ /*** f2c generated code ***/ /* ret_val = iparmq_(ispec, name__, opts, n1, n2, n3, n4, name_len, opts_len) ;*/ ret_val = iparmq_(ispec, name__, opts, n1, n2, n3, n4); return ret_val; /* End of ILAENV */ } /* ilaenv_ */ integer iparmq_(integer *ispec, char *name__, char *opts, integer *n, integer *ilo, integer *ihi, integer *lwork) { /* System generated locals */ integer ret_val, i__1, i__2; real r__1; /* Builtin functions */ double log(doublereal); integer i_nint(real *); /* Local variables */ static integer nh, ns; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This program sets problem and machine dependent parameters useful for xHSEQR and its subroutines. It is called whenever ILAENV is called with 12 <= ISPEC <= 16 Arguments ========= ISPEC (input) integer scalar ISPEC specifies which tunable parameter IPARMQ should return. ISPEC=12: (INMIN) Matrices of order nmin or less are sent directly to xLAHQR, the implicit double shift QR algorithm. NMIN must be at least 11. ISPEC=13: (INWIN) Size of the deflation window. This is best set greater than or equal to the number of simultaneous shifts NS. Larger matrices benefit from larger deflation windows. ISPEC=14: (INIBL) Determines when to stop nibbling and invest in an (expensive) multi-shift QR sweep. If the aggressive early deflation subroutine finds LD converged eigenvalues from an order NW deflation window and LD.GT.(NW*NIBBLE)/100, then the next QR sweep is skipped and early deflation is applied immediately to the remaining active diagonal block. Setting IPARMQ(ISPEC=14) = 0 causes TTQRE to skip a multi-shift QR sweep whenever early deflation finds a converged eigenvalue. Setting IPARMQ(ISPEC=14) greater than or equal to 100 prevents TTQRE from skipping a multi-shift QR sweep. ISPEC=15: (NSHFTS) The number of simultaneous shifts in a multi-shift QR iteration. ISPEC=16: (IACC22) IPARMQ is set to 0, 1 or 2 with the following meanings. 0: During the multi-shift QR sweep, xLAQR5 does not accumulate reflections and does not use matrix-matrix multiply to update the far-from-diagonal matrix entries. 1: During the multi-shift QR sweep, xLAQR5 and/or xLAQRaccumulates reflections and uses matrix-matrix multiply to update the far-from-diagonal matrix entries. 2: During the multi-shift QR sweep. xLAQR5 accumulates reflections and takes advantage of 2-by-2 block structure during matrix-matrix multiplies. (If xTRMM is slower than xGEMM, then IPARMQ(ISPEC=16)=1 may be more efficient than IPARMQ(ISPEC=16)=2 despite the greater level of arithmetic work implied by the latter choice.) NAME (input) character string Name of the calling subroutine OPTS (input) character string This is a concatenation of the string arguments to TTQRE. N (input) integer scalar N is the order of the Hessenberg matrix H. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. LWORK (input) integer scalar The amount of workspace available. Further Details =============== Little is known about how best to choose these parameters. It is possible to use different values of the parameters for each of CHSEQR, DHSEQR, SHSEQR and ZHSEQR. It is probably best to choose different parameters for different matrices and different parameters at different times during the iteration, but this has not been implemented --- yet. The best choices of most of the parameters depend in an ill-understood way on the relative execution rate of xLAQR3 and xLAQR5 and on the nature of each particular eigenvalue problem. Experiment may be the only practical way to determine which choices are most effective. Following is a list of default values supplied by IPARMQ. These defaults may be adjusted in order to attain better performance in any particular computational environment. IPARMQ(ISPEC=12) The xLAHQR vs xLAQR0 crossover point. Default: 75. (Must be at least 11.) IPARMQ(ISPEC=13) Recommended deflation window size. This depends on ILO, IHI and NS, the number of simultaneous shifts returned by IPARMQ(ISPEC=15). The default for (IHI-ILO+1).LE.500 is NS. The default for (IHI-ILO+1).GT.500 is 3*NS/2. IPARMQ(ISPEC=14) Nibble crossover point. Default: 14. IPARMQ(ISPEC=15) Number of simultaneous shifts, NS. a multi-shift QR iteration. If IHI-ILO+1 is ... greater than ...but less ... the or equal to ... than default is 0 30 NS = 2+ 30 60 NS = 4+ 60 150 NS = 10 150 590 NS = ** 590 3000 NS = 64 3000 6000 NS = 128 6000 infinity NS = 256 (+) By default matrices of this order are passed to the implicit double shift routine xLAHQR. See IPARMQ(ISPEC=12) above. These values of NS are used only in case of a rare xLAHQR failure. (**) The asterisks (**) indicate an ad-hoc function increasing from 10 to 64. IPARMQ(ISPEC=16) Select structured matrix multiply. (See ISPEC=16 above for details.) Default: 3. ================================================================ */ if (*ispec == 15 || *ispec == 13 || *ispec == 16) { /* ==== Set the number simultaneous shifts ==== */ nh = *ihi - *ilo + 1; ns = 2; if (nh >= 30) { ns = 4; } if (nh >= 60) { ns = 10; } if (nh >= 150) { /* Computing MAX */ r__1 = log((real) nh) / log(2.f); i__1 = 10, i__2 = nh / i_nint(&r__1); ns = max(i__1,i__2); } if (nh >= 590) { ns = 64; } if (nh >= 3000) { ns = 128; } if (nh >= 6000) { ns = 256; } /* Computing MAX */ i__1 = 2, i__2 = ns - ns % 2; ns = max(i__1,i__2); } if (*ispec == 12) { /* ===== Matrices of order smaller than NMIN get sent . to xLAHQR, the classic double shift algorithm. . This must be at least 11. ==== */ ret_val = 75; } else if (*ispec == 14) { /* ==== INIBL: skip a multi-shift qr iteration and . whenever aggressive early deflation finds . at least (NIBBLE*(window size)/100) deflations. ==== */ ret_val = 14; } else if (*ispec == 15) { /* ==== NSHFTS: The number of simultaneous shifts ===== */ ret_val = ns; } else if (*ispec == 13) { /* ==== NW: deflation window size. ==== */ if (nh <= 500) { ret_val = ns; } else { ret_val = ns * 3 / 2; } } else if (*ispec == 16) { /* ==== IACC22: Whether to accumulate reflections . before updating the far-from-diagonal elements . and whether to use 2-by-2 block structure while . doing it. A small amount of work could be saved . by making this choice dependent also upon the . NH=IHI-ILO+1. */ ret_val = 0; if (ns >= 14) { ret_val = 1; } if (ns >= 14) { ret_val = 2; } } else { /* ===== invalid value of ispec ===== */ ret_val = -1; } /* ==== End of IPARMQ ==== */ return ret_val; } /* iparmq_ */ nipy-0.6.1/lib/lapack_lite/f2c.h000066400000000000000000000102641470056100100163330ustar00rootroot00000000000000/* f2c.h -- Standard Fortran to C header file */ /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ #ifndef F2C_INCLUDE #define F2C_INCLUDE typedef int integer; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ #ifdef f2c_i2 /* for -i2 */ typedef short flag; typedef short ftnlen; typedef short ftnint; #else typedef int flag; typedef int ftnlen; typedef int ftnint; #endif /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ shortint h; integer i; real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; typedef long Long; /* No longer used; formerly in Namelist */ struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; #ifndef abs #define abs(x) ((x) >= 0 ? (x) : -(x)) #endif #define dabs(x) (doublereal)abs(x) #ifndef min #define min(a,b) ((a) <= (b) ? (a) : (b)) #endif #ifndef max #define max(a,b) ((a) >= (b) ? (a) : (b)) #endif #define dmin(a,b) (doublereal)min(a,b) #define dmax(a,b) (doublereal)max(a,b) /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef int /* Unknown procedure type */ (*U_fp)(...); typedef shortint (*J_fp)(...); typedef integer (*I_fp)(...); typedef real (*R_fp)(...); typedef doublereal (*D_fp)(...), (*E_fp)(...); typedef /* Complex */ VOID (*C_fp)(...); typedef /* Double Complex */ VOID (*Z_fp)(...); typedef logical (*L_fp)(...); typedef shortlogical (*K_fp)(...); typedef /* Character */ VOID (*H_fp)(...); typedef /* Subroutine */ int (*S_fp)(...); #else typedef int /* Unknown procedure type */ (*U_fp)(void); typedef shortint (*J_fp)(void); typedef integer (*I_fp)(void); typedef real (*R_fp)(void); typedef doublereal (*D_fp)(void), (*E_fp)(void); typedef /* Complex */ VOID (*C_fp)(void); typedef /* Double Complex */ VOID (*Z_fp)(void); typedef logical (*L_fp)(void); typedef shortlogical (*K_fp)(void); typedef /* Character */ VOID (*H_fp)(void); typedef /* Subroutine */ int (*S_fp)(void); #endif /* E_fp is for real functions when -R is not specified */ typedef VOID C_f; /* complex function */ typedef VOID H_f; /* character function */ typedef VOID Z_f; /* double complex function */ typedef doublereal E_f; /* real function with -R not specified */ /* undef any lower-case symbols that your C compiler predefines, e.g.: */ #ifndef Skip_f2c_Undefs #undef cray #undef gcos #undef mc68010 #undef mc68020 #undef mips #undef pdp11 #undef sgi #undef sparc #undef sun #undef sun2 #undef sun3 #undef sun4 #undef u370 #undef u3b #undef u3b2 #undef u3b5 #undef unix #undef vax #endif #endif nipy-0.6.1/lib/lapack_lite/f2c_lite.c000066400000000000000000000200731470056100100173420ustar00rootroot00000000000000#include #include #include #include #include "f2c.h" extern void s_wsfe(cilist *f) {;} extern void e_wsfe(void) {;} extern void do_fio(integer *c, char *s, ftnlen l) {;} /* You'll want this if you redo the *_lite.c files with the -C option * to f2c for checking array subscripts. (It's not suggested you do that * for production use, of course.) */ extern int s_rnge(char *var, int index, char *routine, int lineno) { fprintf(stderr, "array index out-of-bounds for %s[%d] in routine %s:%d\n", var, index, routine, lineno); fflush(stderr); abort(); } #ifdef KR_headers extern double sqrt(); double f__cabs(real, imag) double real, imag; #else #undef abs double f__cabs(double real, double imag) #endif { double temp; if(real < 0) real = -real; if(imag < 0) imag = -imag; if(imag > real){ temp = real; real = imag; imag = temp; } if((imag+real) == real) return((double)real); temp = imag/real; temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/ return(temp); } VOID #ifdef KR_headers d_cnjg(r, z) doublecomplex *r, *z; #else d_cnjg(doublecomplex *r, doublecomplex *z) #endif { r->r = z->r; r->i = - z->i; } #ifdef KR_headers double d_imag(z) doublecomplex *z; #else double d_imag(doublecomplex *z) #endif { return(z->i); } #define log10e 0.43429448190325182765 #ifdef KR_headers double log(); double d_lg10(x) doublereal *x; #else #undef abs double d_lg10(doublereal *x) #endif { return( log10e * log(*x) ); } #ifdef KR_headers double d_sign(a,b) doublereal *a, *b; #else double d_sign(doublereal *a, doublereal *b) #endif { double x; x = (*a >= 0 ? *a : - *a); return( *b >= 0 ? x : -x); } #ifdef KR_headers double floor(); integer i_dnnt(x) doublereal *x; #else #undef abs integer i_dnnt(doublereal *x) #endif { return( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } /* Additions to the original numpy code for compliency with Lapack 3-1-1 */ #ifdef KR_headers double floor(); double d_nint(x) doublereal *x; #else #undef abs double d_nint(doublereal *x) #endif { return( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } #ifdef KR_headers double floor(); integer i_nint(x) real *x; #else #undef abs integer i_nint(real *x) #endif { return (integer)(*x >= 0 ? floor(*x + .5) : -floor(.5 - *x)); } /* End of additions */ #ifdef KR_headers double pow(); double pow_dd(ap, bp) doublereal *ap, *bp; #else #undef abs double pow_dd(doublereal *ap, doublereal *bp) #endif { return(pow(*ap, *bp) ); } #ifdef KR_headers double pow_di(ap, bp) doublereal *ap; integer *bp; #else double pow_di(doublereal *ap, integer *bp) #endif { double pow, x; integer n; unsigned long u; pow = 1; x = *ap; n = *bp; if(n != 0) { if(n < 0) { n = -n; x = 1/x; } for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return(pow); } /* Unless compiled with -DNO_OVERWRITE, this variant of s_cat allows the * target of a concatenation to appear on its right-hand side (contrary * to the Fortran 77 Standard, but in accordance with Fortran 90). */ #define NO_OVERWRITE #ifndef NO_OVERWRITE #undef abs #ifdef KR_headers extern char *F77_aloc(); extern void free(); extern void exit_(); #else extern char *F77_aloc(ftnlen, char*); #endif #endif /* NO_OVERWRITE */ VOID #ifdef KR_headers s_cat(lp, rpp, rnp, np, ll) char *lp, *rpp[]; ftnlen rnp[], *np, ll; #else s_cat(char *lp, char *rpp[], ftnlen rnp[], ftnlen *np, ftnlen ll) #endif { ftnlen i, nc; char *rp; ftnlen n = *np; #ifndef NO_OVERWRITE ftnlen L, m; char *lp0, *lp1; lp0 = 0; lp1 = lp; L = ll; i = 0; while(i < n) { rp = rpp[i]; m = rnp[i++]; if (rp >= lp1 || rp + m <= lp) { if ((L -= m) <= 0) { n = i; break; } lp1 += m; continue; } lp0 = lp; lp = lp1 = F77_aloc(L = ll, "s_cat"); break; } lp1 = lp; #endif /* NO_OVERWRITE */ for(i = 0 ; i < n ; ++i) { nc = ll; if(rnp[i] < nc) nc = rnp[i]; ll -= nc; rp = rpp[i]; while(--nc >= 0) *lp++ = *rp++; } while(--ll >= 0) *lp++ = ' '; #ifndef NO_OVERWRITE if (lp0) { memmove(lp0, lp1, L); free(lp1); } #endif } /* compare two strings */ #ifdef KR_headers integer s_cmp(a0, b0, la, lb) char *a0, *b0; ftnlen la, lb; #else integer s_cmp(char *a0, char *b0, ftnlen la, ftnlen lb) #endif { register unsigned char *a, *aend, *b, *bend; a = (unsigned char *)a0; b = (unsigned char *)b0; aend = a + la; bend = b + lb; if(la <= lb) { while(a < aend) if(*a != *b) return( *a - *b ); else { ++a; ++b; } while(b < bend) if(*b != ' ') return( ' ' - *b ); else ++b; } else { while(b < bend) if(*a == *b) { ++a; ++b; } else return( *a - *b ); while(a < aend) if(*a != ' ') return(*a - ' '); else ++a; } return(0); } /* Unless compiled with -DNO_OVERWRITE, this variant of s_copy allows the * target of an assignment to appear on its right-hand side (contrary * to the Fortran 77 Standard, but in accordance with Fortran 90), * as in a(2:5) = a(4:7) . */ /* assign strings: a = b */ #ifdef KR_headers VOID s_copy(a, b, la, lb) register char *a, *b; ftnlen la, lb; #else void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb) #endif { register char *aend, *bend; aend = a + la; if(la <= lb) #ifndef NO_OVERWRITE if (a <= b || a >= b + la) #endif while(a < aend) *a++ = *b++; #ifndef NO_OVERWRITE else for(b += la; a < aend; ) *--aend = *--b; #endif else { bend = b + lb; #ifndef NO_OVERWRITE if (a <= b || a >= bend) #endif while(b < bend) *a++ = *b++; #ifndef NO_OVERWRITE else { a += lb; while(b < bend) *--a = *--bend; a += lb; } #endif while(a < aend) *a++ = ' '; } } #ifdef KR_headers double f__cabs(); double z_abs(z) doublecomplex *z; #else double f__cabs(double, double); double z_abs(doublecomplex *z) #endif { return( f__cabs( z->r, z->i ) ); } #ifdef KR_headers extern void sig_die(); VOID z_div(c, a, b) doublecomplex *a, *b, *c; #else extern void sig_die(char*, int); void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b) #endif { double ratio, den; double abr, abi; if( (abr = b->r) < 0.) abr = - abr; if( (abi = b->i) < 0.) abi = - abi; if( abr <= abi ) { /*Let IEEE Infinties handle this ;( */ /*if(abi == 0) sig_die("complex division by zero", 1);*/ ratio = b->r / b->i ; den = b->i * (1 + ratio*ratio); c->r = (a->r*ratio + a->i) / den; c->i = (a->i*ratio - a->r) / den; } else { ratio = b->i / b->r ; den = b->r * (1 + ratio*ratio); c->r = (a->r + a->i*ratio) / den; c->i = (a->i - a->r*ratio) / den; } } #ifdef KR_headers double sqrt(), f__cabs(); VOID z_sqrt(r, z) doublecomplex *r, *z; #else #undef abs extern double f__cabs(double, double); void z_sqrt(doublecomplex *r, doublecomplex *z) #endif { double mag; if( (mag = f__cabs(z->r, z->i)) == 0.) r->r = r->i = 0.; else if(z->r > 0) { r->r = sqrt(0.5 * (mag + z->r) ); r->i = z->i / r->r / 2; } else { r->i = sqrt(0.5 * (mag - z->r) ); if(z->i < 0) r->i = - r->i; r->r = z->i / r->i / 2; } } #ifdef __cplusplus extern "C" { #endif #ifdef KR_headers integer pow_ii(ap, bp) integer *ap, *bp; #else integer pow_ii(integer *ap, integer *bp) #endif { integer pow, x, n; unsigned long u; x = *ap; n = *bp; if (n <= 0) { if (n == 0 || x == 1) return 1; if (x != -1) return x == 0 ? 1/x : 0; n = -n; } u = n; for(pow = 1; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } return(pow); } #ifdef __cplusplus } #endif #ifdef KR_headers extern void f_exit(); VOID s_stop(s, n) char *s; ftnlen n; #else #undef abs #undef min #undef max #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus extern "C" { #endif void f_exit(void); int s_stop(char *s, ftnlen n) #endif { int i; if(n > 0) { fprintf(stderr, "STOP "); for(i = 0; i= 1.1.1', default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', 'c_std=c17', ], ) cc = meson.get_compiler('c') # Check compiler is recent enough (see "Toolchain Roadmap" for details) if cc.get_id() == 'gcc' if not cc.version().version_compare('>=8.0') error('nipy requires GCC >= 8.0') endif elif cc.get_id() == 'msvc' if not cc.version().version_compare('>=19.20') error('nipy requires at least vc142 (default with Visual Studio 2019) ' + \ 'when building with MSVC') endif endif _global_c_args = cc.get_supported_arguments( '-Wno-unused-function', ) add_project_arguments(_global_c_args, language: ['c']) # We need -lm for all C code (assuming it uses math functions, which is safe to # assume for nipy). m_dep = cc.find_library('m', required : false) if m_dep.found() add_project_link_arguments('-lm', language : 'c') endif cython = find_program('cython') # https://mesonbuild.com/Python-module.html py = import('python').find_installation(pure: false) py_dep = py.dependency() # Platform detection is_windows = host_machine.system() == 'windows' is_mingw = is_windows and cc.get_id() == 'gcc' cython_c_args = [] if is_windows # For mingw-w64, link statically against the UCRT. gcc_link_args = ['-lucrt', '-static'] if is_mingw add_project_link_arguments(gcc_link_args, language: ['c', 'cpp']) # Force gcc to float64 long doubles for compatibility with MSVC # builds, for C only. add_project_arguments('-mlong-double-64', language: 'c') # Make fprintf("%zd") work (see https://github.com/rgommers/scipy/issues/118) add_project_arguments('-D__USE_MINGW_ANSI_STDIO=1', language: ['c', 'cpp']) # Manual add of MS_WIN64 macro when not using MSVC. # https://bugs.python.org/issue28267 bitness = run_command( 'nipy/_build_utils/gcc_build_bitness.py', check: true ).stdout().strip() if bitness == '64' add_project_arguments('-DMS_WIN64', language: ['c', 'cpp']) endif # Silence warnings emitted by PyOS_snprintf for (%zd), see # https://github.com/rgommers/scipy/issues/118. # Use as c_args for extensions containing Cython code cython_c_args += ['-Wno-format-extra-args', '-Wno-format'] endif endif # When cross-compiling, the compiler needs access to NumPy # headers for the host platform (where the package will actually run). These # headers may be incompatible with any corresponding headers that might be # installed on the build system (where the compilation is performed). To make # sure that the compiler finds the right headers, paths can be configured in # the 'properties' section of a Meson cross file: # # [properties] # numpy-include-dir = '/path/to/host/numpy/includes' # # If a cross file is not provided or does not specify either of these # properties, fall back to running Python on the build system to query NumPy or # Pythran directly for the appropriate paths. This will detect appropriate # paths for native builds. (This might even work for certain build/host cross # combinations, but don't rely on that.) # # For more information about cross compilation in Meson, including a definition # of "build" and "host" in this context, refer to # # https://mesonbuild.com/Cross-compilation.html # NumPy include directory incdir_numpy = meson.get_external_property('numpy-include-dir', 'not-given') if incdir_numpy == 'not-given' # If not specified, try to query NumPy from the build python incdir_numpy = run_command(py, [ '-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())' ], check: true ).stdout().strip() endif inc_np = include_directories(incdir_numpy) # Deal with M_PI & friends; add `use_math_defines` to c_args # Cython doesn't always get this correctly itself # explicitly add the define as a compiler flag for Cython-generated code. if is_windows use_math_defines = ['-D_USE_MATH_DEFINES'] else use_math_defines = [] endif # Don't use the deprecated NumPy C API. Define this to a fixed version instead of # NPY_API_VERSION in order not to break compilation for released SciPy versions # when NumPy introduces a new deprecation. Use in a meson.build file:: # # py.extension_module('_name', # 'source_fname', # numpy_nodepr_api) # numpy_nodepr_api = '-DNPY_NO_DEPRECATED_API=NPY_1_9_API_VERSION' subdir('lib') subdir('nipy') nipy-0.6.1/nipy/000077500000000000000000000000001470056100100134465ustar00rootroot00000000000000nipy-0.6.1/nipy/COMMIT_INFO.txt000066400000000000000000000004171470056100100160140ustar00rootroot00000000000000# This is an ini file that may contain information about the code state [commit hash] # The line below may contain a valid hash if it has been substituted during 'git archive' archive_subst_hash=e256e7016c # This line may be modified by the install process install_hash= nipy-0.6.1/nipy/__init__.py000066400000000000000000000016761470056100100155710ustar00rootroot00000000000000""" Nipy Nipy is a library for neuroimaging analysis. """ import os # When updating here, also update meson.build file. # Form for development. # __version__ = "0.6.2.dev1" # Form for release. __version__ = "0.6.1" def _test_local_install(): """ Warn the user that running with nipy being imported locally is a bad idea. """ import os if os.getcwd() == os.sep.join( os.path.abspath(__file__).split(os.sep)[:-2]): import warnings warnings.warn('Running the tests from the install directory may ' 'trigger some failures') _test_local_install() # Add to top-level namespace from nipy.core.api import is_image from nipy.io.api import as_image, load_image, save_image # Set up package information function from .pkg_info import get_pkg_info as _get_pkg_info get_info = lambda : _get_pkg_info(os.path.dirname(__file__)) # Cleanup namespace del _test_local_install nipy-0.6.1/nipy/_build_utils/000077500000000000000000000000001470056100100161245ustar00rootroot00000000000000nipy-0.6.1/nipy/_build_utils/cythoner.py000077500000000000000000000012411470056100100203320ustar00rootroot00000000000000#!/usr/bin/env python3 """ Scipy variant of Cython command Cython, as applied to single pyx file. Expects two arguments, infile and outfile. Other options passed through to cython command line parser. """ import os import os.path as op import subprocess as sbp import sys def main(): in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3]) sbp.run( [ 'cython', '-3', '--fast-fail', '--output-file', out_fname, '--include-dir', os.getcwd(), ] + sys.argv[3:] + [in_fname], check=True, ) if __name__ == '__main__': main() nipy-0.6.1/nipy/_build_utils/gcc_build_bitness.py000077500000000000000000000010311470056100100221360ustar00rootroot00000000000000#!/usr/bin/env python3 """ Detect bitness (32 or 64) of Mingw-w64 gcc build target on Windows. """ import re from subprocess import run def main(): res = run(['gcc', '-v'], check=True, text=True, capture_output=True) target = re.search(r'^Target: (.*)$', res.stderr, flags=re.MULTILINE).groups()[0] if target.startswith('i686'): print('32') elif target.startswith('x86_64'): print('64') else: raise RuntimeError('Could not detect Mingw-w64 bitness') if __name__ == "__main__": main() nipy-0.6.1/nipy/algorithms/000077500000000000000000000000001470056100100156175ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/__init__.py000066400000000000000000000004571470056100100177360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Generic algorithms such as registration, statistics, simulation, etc. """ __docformat__ = 'restructuredtext' from . import diagnostics, fwhm, interpolation, kernel_smooth, statistics nipy-0.6.1/nipy/algorithms/clustering/000077500000000000000000000000001470056100100177765ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/clustering/__init__.py000066400000000000000000000004111470056100100221030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This sub-package contains functions for clustering. It might be removed in the future, and replaced by an optional dependence on scikit learn. """ nipy-0.6.1/nipy/algorithms/clustering/bgmm.py000066400000000000000000001075611470056100100213040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Bayesian Gaussian Mixture Model Classes: contains the basic fields and methods of Bayesian GMMs the high level functions are/should be binded in C The base class BGMM relies on an implementation that performs Gibbs sampling A derived class VBGMM uses Variational Bayes inference instead A third class is introduces to take advnatge of the old C-bindings, but it is limited to diagonal covariance models Author : Bertrand Thirion, 2008-2011 """ import math import numpy as np import numpy.random as nr from scipy.linalg import cholesky, eigvalsh, inv from scipy.special import gammaln from .gmm import GMM from .utils import kmeans ################################################################## # ancillary functions ############################################ ################################################################## def detsh(H): """ Routine for the computation of determinants of symmetric positive matrices Parameters ---------- H array of shape(n,n) the input matrix, assumed symmetric and positive Returns ------- dh: float, the determinant """ return np.prod(eigvalsh(H)) def dirichlet_eval(w, alpha): """ Evaluate the probability of a certain discrete draw w from the Dirichlet density with parameters alpha Parameters ---------- w: array of shape (n) alpha: array of shape (n) """ if np.shape(w) != np.shape(alpha): raise ValueError("incompatible dimensions") loge = np.sum((alpha-1) * np.log(w)) logb = np.sum(gammaln(alpha)) - gammaln(alpha.sum()) loge -= logb return np.exp(loge) def generate_normals(m, P): """ Generate a Gaussian sample with mean m and precision P Parameters ---------- m array of shape n: the mean vector P array of shape (n,n): the precision matrix Returns ------- ng : array of shape(n): a draw from the gaussian density """ icp = inv(cholesky(P)) ng = nr.randn(m.shape[0]) ng = np.dot(ng, icp) ng += m return ng def generate_Wishart(n, V): """ Generate a sample from Wishart density Parameters ---------- n: float, the number of degrees of freedom of the Wishart density V: array of shape (n,n) the scale matrix of the Wishart density Returns ------- W: array of shape (n,n) the draw from Wishart density """ icv = cholesky(V) p = V.shape[0] A = nr.randn(p, p) for i in range(p): A[i, i:] = 0 A[i, i] = np.sqrt(nr.chisquare(n - i)) R = np.dot(icv, A) W = np.dot(R, R.T) return W def wishart_eval(n, V, W, dV=None, dW=None, piV=None): """Evaluation of the probability of W under Wishart(n,V) Parameters ---------- n: float, the number of degrees of freedom (dofs) V: array of shape (n,n) the scale matrix of the Wishart density W: array of shape (n,n) the sample to be evaluated dV: float, optional, determinant of V dW: float, optional, determinant of W piV: array of shape (n,n), optional inverse of V Returns ------- (float) the density """ # check that shape(V)==shape(W) p = V.shape[0] if dV is None: dV = detsh(V) if dW is None: dW = detsh(W) if piV is None: piV = inv(V) ldW = math.log(dW) * (n - p - 1) / 2 ltr = - np.trace(np.dot(piV, W)) / 2 la = (n * p * math.log(2) + math.log(dV) * n) / 2 lg = math.log(math.pi) * p * (p - 1) / 4 lg += gammaln(np.arange(n - p + 1, n + 1).astype(np.float64) / 2).sum() lt = ldW + ltr - la - lg return math.exp(lt) def normal_eval(mu, P, x, dP=None): """ Probability of x under normal(mu, inv(P)) Parameters ---------- mu: array of shape (n), the mean parameter P: array of shape (n, n), the precision matrix x: array of shape (n), the data to be evaluated Returns ------- (float) the density """ dim = P.shape[0] if dP is None: dP = detsh(P) w0 = math.log(dP) - dim * math.log(2 * math.pi) w0 /= 2 dx = mu - x q = np.dot(np.dot(P, dx), dx) w = w0 - q / 2 like = math.exp(w) return like def generate_perm(k, nperm=100): """ returns an array of shape(nbperm, k) representing the permutations of k elements Parameters ---------- k, int the number of elements to be permuted nperm=100 the maximal number of permutations if gamma(k+1)>nperm: only nperm random draws are generated Returns ------- p: array of shape(nperm,k): each row is permutation of k """ from scipy.special import gamma if k == 1: return np.reshape(np.array([0]), (1, 1)).astype(np.int_) if gamma(k + 1) < nperm: # exhaustive permutations aux = generate_perm(k - 1) n = aux.shape[0] perm = np.zeros((n * k, k)).astype(np.int_) for i in range(k): perm[i * n:(i + 1) * n, :i] = aux[:, :i] perm[i * n:(i + 1) * n, i] = k-1 perm[i * n:(i + 1) * n, i + 1:] = aux[:, i:] else: from numpy.random import rand perm = np.zeros((nperm, k)).astype(np.int_) for i in range(nperm): p = np.argsort(rand(k)) perm[i] = p return perm def multinomial(probabilities): """ Generate samples form a miltivariate distribution Parameters ---------- probabilities: array of shape (nelements, nclasses): likelihood of each element belonging to each class each row is assumed to sum to 1 One sample is draw from each row, resulting in Returns ------- z array of shape (nelements): the draws, that take values in [0..nclasses-1] """ nvox = probabilities.shape[0] nclasses = probabilities.shape[1] cuml = np.zeros((nvox, nclasses + 1)) cuml[:, 1:] = np.cumsum(probabilities, 1) aux = np.random.rand(nvox, 1) z = np.argmax(aux < cuml, 1)-1 return z def dkl_gaussian(m1, P1, m2, P2): """ Returns the KL divergence between gausians densities Parameters ---------- m1: array of shape (n), the mean parameter of the first density P1: array of shape(n,n), the precision parameters of the first density m2: array of shape (n), the mean parameter of the second density P2: array of shape(n,n), the precision parameters of the second density """ tiny = 1.e-15 dim = np.size(m1) if m1.shape != m2.shape: raise ValueError("incompatible dimensions for m1 and m2") if P1.shape != P2.shape: raise ValueError("incompatible dimensions for P1 and P2") if P1.shape[0] != dim: raise ValueError("incompatible dimensions for m1 and P1") d1 = max(detsh(P1), tiny) d2 = max(detsh(P2), tiny) dkl = np.log(d1 / d2) + np.trace(np.dot(P2, inv(P1))) - dim dkl += np.dot(np.dot((m1 - m2).T, P2), (m1 - m2)) dkl /= 2 return dkl def dkl_wishart(a1, B1, a2, B2): """ returns the KL divergence bteween two Wishart distribution of parameters (a1,B1) and (a2,B2), Parameters ---------- a1: Float, degrees of freedom of the first density B1: array of shape(n,n), scale matrix of the first density a2: Float, degrees of freedom of the second density B2: array of shape(n,n), scale matrix of the second density Returns ------- dkl: float, the Kullback-Leibler divergence """ from scipy.special import gammaln, psi tiny = 1.e-15 if B1.shape != B2.shape: raise ValueError("incompatible dimensions for B1 and B2") dim = B1.shape[0] d1 = max(detsh(B1), tiny) d2 = max(detsh(B2), tiny) lgc = dim * (dim - 1) * math.log(np.pi) / 4 lg1 = lgc lg2 = lgc lw1 = - math.log(d1) + dim * math.log(2) lw2 = - math.log(d2) + dim * math.log(2) for i in range(dim): lg1 += gammaln((a1 - i) / 2) lg2 += gammaln((a2 - i) / 2) lw1 += psi((a1 - i) / 2) lw2 += psi((a2 - i) / 2) lz1 = 0.5 * a1 * dim * math.log(2) - 0.5 * a1 * math.log(d1) + lg1 lz2 = 0.5 * a2 * dim * math.log(2) - 0.5 * a2 * math.log(d2) + lg2 dkl = (a1 - dim - 1) * lw1 - (a2 - dim - 1) * lw2 - a1 * dim dkl += a1 * np.trace(np.dot(B2, inv(B1))) dkl /= 2 dkl += (lz2 - lz1) return dkl def dkl_dirichlet(w1, w2): """ Returns the KL divergence between two dirichlet distribution Parameters ---------- w1: array of shape(n), the parameters of the first dirichlet density w2: array of shape(n), the parameters of the second dirichlet density """ if w1.shape != w2.shape: raise ValueError("incompatible dimensions for w1 and w2") dkl = 0 from scipy.special import gammaln, psi dkl = np.sum(gammaln(w2)) - np.sum(gammaln(w1)) dkl += gammaln(np.sum(w1)) - gammaln(np.sum(w2)) dkl += np.sum((w1 - w2) * (psi(w1) - psi(np.sum(w1)))) return dkl ####################################################################### # main GMM class ##################################################### ####################################################################### class BGMM(GMM): """ This class implements Bayesian GMMs this class contains the following fields k: int, the number of components in the mixture dim: int, the dimension of the data means: array of shape (k, dim) all the means of the components precisions: array of shape (k, dim, dim) the precisions of the components weights: array of shape (k): weights of the mixture shrinkage: array of shape (k): scaling factor of the posterior precisions on the mean dof: array of shape (k) the degrees of freedom of the components prior_means: array of shape (k, dim): the prior on the components means prior_scale: array of shape (k, dim): the prior on the components precisions prior_dof: array of shape (k): the prior on the dof (should be at least equal to dim) prior_shrinkage: array of shape (k): scaling factor of the prior precisions on the mean prior_weights: array of shape (k) the prior on the components weights shrinkage: array of shape (k): scaling factor of the posterior precisions on the mean dof : array of shape (k): the posterior dofs fixme ----- only 'full' precision is supported """ def __init__(self, k=1, dim=1, means=None, precisions=None, weights=None, shrinkage=None, dof=None): """ Initialize the structure with the dimensions of the problem Eventually provide different terms """ GMM.__init__(self, k, dim, 'full', means, precisions, weights) self.shrinkage = shrinkage self.dof = dof if self.shrinkage is None: self.shrinkage = np.ones(self.k) if self.dof is None: self.dof = np.ones(self.k) if self.precisions is not None: self._detp = [detsh(self.precisions[k]) for k in range(self.k)] def check(self): """ Checking the shape of sifferent matrices involved in the model """ GMM.check(self) if self.prior_means.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_means") if self.prior_means.shape[1] != self.dim: raise ValueError("Incorrect dimension for self.prior_means") if self.prior_scale.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_scale") if self.prior_scale.shape[1] != self.dim: raise ValueError("Incorrect dimension for self.prior_scale") if self.prior_dof.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_dof") if self.prior_weights.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_weights") def set_priors(self, prior_means, prior_weights, prior_scale, prior_dof, prior_shrinkage): """ Set the prior of the BGMM Parameters ---------- prior_means: array of shape (self.k,self.dim) prior_weights: array of shape (self.k) prior_scale: array of shape (self.k,self.dim,self.dim) prior_dof: array of shape (self.k) prior_shrinkage: array of shape (self.k) """ self.prior_means = prior_means self.prior_weights = prior_weights self.prior_scale = prior_scale self.prior_dof = prior_dof self.prior_shrinkage = prior_shrinkage # cache some pre-computations self._dets = [detsh(self.prior_scale[k]) for k in range(self.k)] self._inv_prior_scale = np.array([inv(self.prior_scale[k]) for k in range(self.k)]) self.check() def guess_priors(self, x, nocheck=0): """ Set the priors in order of having them weakly uninformative this is from Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x, array of shape (nb_samples,self.dim) the data used in the estimation process nocheck: boolean, optional, if nocheck==True, check is skipped """ # a few parameters small = 0.01 elshape = (1, self.dim, self.dim) mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.dot(dx.T, dx) / x.shape[0] px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape) px *= np.exp(2.0 / self.dim * math.log(self.k)) # set the priors self.prior_means = np.repeat(mx, self.k, 0) self.prior_weights = np.ones(self.k) self.prior_scale = np.repeat(px, self.k, 0) self.prior_dof = np.ones(self.k) * (self.dim + 2) self.prior_shrinkage = np.ones(self.k) * small # cache some pre-computations self._dets = np.ones(self.k) * detsh(px[0]) self._inv_prior_scale = np.repeat( np.reshape(inv(px[0]), elshape), self.k, 0) # check that everything is OK if nocheck == True: self.check() def initialize(self, x): """ initialize z using a k-means algorithm, then update the parameters Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process """ if self.k > 1: cent, z, J = kmeans(x, self.k) else: z = np.zeros(x.shape[0]).astype(np.int_) self.update(x, z) def pop(self, z): """ compute the population, i.e. the statistics of allocation Parameters ---------- z array of shape (nb_samples), type = np.int_ the allocation variable Returns ------- hist : array shape (self.k) count variable """ hist = np.array([np.sum(z == k) for k in range(self.k)]) return hist def update_weights(self, z): """ Given the allocation vector z, resample the weights parameter Parameters ---------- z array of shape (nb_samples), type = np.int_ the allocation variable """ pop = self.pop(z) weights = pop + self.prior_weights self.weights = np.random.dirichlet(weights) def update_means(self, x, z): """ Given the allocation vector z, and the corresponding data x, resample the mean Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process z: array of shape (nb_samples), type = np.int_ the corresponding classification """ pop = self.pop(z) self.shrinkage = self.prior_shrinkage + pop empmeans = np.zeros(np.shape(self.means)) prior_shrinkage = np.reshape(self.prior_shrinkage, (self.k, 1)) shrinkage = np.reshape(self.shrinkage, (self.k, 1)) for k in range(self.k): empmeans[k] = np.sum(x[z == k], 0) means = empmeans + self.prior_means * prior_shrinkage means /= shrinkage for k in range(self.k): self.means[k] = generate_normals(\ means[k], self.precisions[k] * self.shrinkage[k]) def update_precisions(self, x, z): """ Given the allocation vector z, and the corresponding data x, resample the precisions Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int_ the corresponding classification """ pop = self.pop(z) self.dof = self.prior_dof + pop + 1 rpop = pop + (pop == 0) self._detp = np.zeros(self.k) for k in range(self.k): # empirical means empmeans = np.sum(x[z == k], 0) / rpop[k] dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim)) # scatter dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim)) scatter = np.dot(dx.T, dx) # bias addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k] # covariance = prior term + scatter + bias covariance = self._inv_prior_scale[k] + scatter + addcov #precision scale = inv(covariance) self.precisions[k] = generate_Wishart(self.dof[k], scale) self._detp[k] = detsh(self.precisions[k]) def update(self, x, z): """ update function (draw a sample of the GMM parameters) Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int_ the corresponding classification """ self.update_weights(z) self.update_precisions(x, z) self.update_means(x, z) def sample_indicator(self, like): """ sample the indicator from the likelihood Parameters ---------- like: array of shape (nb_samples,self.k) component-wise likelihood Returns ------- z: array of shape(nb_samples): a draw of the membership variable """ tiny = 1 + 1.e-15 like = (like.T / like.sum(1)).T like /= tiny z = multinomial(like) return z def sample(self, x, niter=1, mem=0, verbose=0): """ sample the indicator and parameters Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process niter=1 : the number of iterations to perform mem=0: if mem, the best values of the parameters are computed verbose=0: verbosity mode Returns ------- best_weights: array of shape (self.k) best_means: array of shape (self.k, self.dim) best_precisions: array of shape (self.k, self.dim, self.dim) possibleZ: array of shape (nb_samples, niter) the z that give the highest posterior to the data is returned first """ self.check_x(x) if mem: possibleZ = - np.ones((x.shape[0], niter)).astype(np.int_) score = - np.inf bpz = - np.inf for i in range(niter): like = self.likelihood(x) sll = np.mean(np.log(np.sum(like, 1))) sll += np.log(self.probability_under_prior()) if sll > score: score = sll best_weights = self.weights.copy() best_means = self.means.copy() best_precisions = self.precisions.copy() z = self.sample_indicator(like) if mem: possibleZ[:, i] = z puz = sll # to save time self.update(x, z) if puz > bpz: ibz = i bpz = puz if mem: aux = possibleZ[:, 0].copy() possibleZ[:, 0] = possibleZ[:, ibz].copy() possibleZ[:, ibz] = aux return best_weights, best_means, best_precisions, possibleZ def sample_and_average(self, x, niter=1, verbose=0): """ sample the indicator and parameters the average values for weights,means, precisions are returned Parameters ---------- x = array of shape (nb_samples,dim) the data from which bic is computed niter=1: number of iterations Returns ------- weights: array of shape (self.k) means: array of shape (self.k,self.dim) precisions: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) these are the average parameters across samplings Notes ----- All this makes sense only if no label switching as occurred so this is wrong in general (asymptotically). fix: implement a permutation procedure for components identification """ aprec = np.zeros(np.shape(self.precisions)) aweights = np.zeros(np.shape(self.weights)) ameans = np.zeros(np.shape(self.means)) for i in range(niter): like = self.likelihood(x) z = self.sample_indicator(like) self.update(x, z) aprec += self.precisions aweights += self.weights ameans += self.means aprec /= niter ameans /= niter aweights /= niter return aweights, ameans, aprec def probability_under_prior(self): """ Compute the probability of the current parameters of self given the priors """ p0 = 1 p0 = dirichlet_eval(self.weights, self.prior_weights) for k in range(self.k): mp = np.reshape(self.precisions[k] * self.prior_shrinkage[k], (self.dim, self.dim)) p0 *= normal_eval(self.prior_means[k], mp, self.means[k]) p0 *= wishart_eval(self.prior_dof[k], self.prior_scale[k], self.precisions[k], dV=self._dets[k], dW=self._detp[k], piV=self._inv_prior_scale[k]) return p0 def conditional_posterior_proba(self, x, z, perm=None): """ Compute the probability of the current parameters of self given x and z Parameters ---------- x: array of shape (nb_samples, dim), the data from which bic is computed z: array of shape (nb_samples), type = np.int_, the corresponding classification perm: array ok shape(nperm, self.k),typ=np.int_, optional all permutation of z under which things will be recomputed By default, no permutation is performed """ pop = self.pop(z) rpop = (pop + (pop == 0)).astype(np.float64) dof = self.prior_dof + pop + 1 shrinkage = self.prior_shrinkage + pop weights = pop + self.prior_weights # initialize the porsterior proba if perm is None: pp = dirichlet_eval(self.weights, weights) else: pp = np.array([dirichlet_eval(self.weights[pj], weights) for pj in perm]) for k in range(self.k): m1 = np.sum(x[z == k], 0) #0. Compute the empirical means empmeans = m1 / rpop[k] #1. the precisions dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim)) dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim)) addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k] covariance = self._inv_prior_scale[k] + np.dot(dx.T, dx) + addcov scale = inv(covariance) _dets = detsh(scale) #2. the means means = m1 + self.prior_means[k] * self.prior_shrinkage[k] means /= shrinkage[k] #4. update the posteriors if perm is None: pp *= wishart_eval( dof[k], scale, self.precisions[k], dV=_dets, dW=self._detp[k], piV=covariance) else: for j, pj in enumerate(perm): pp[j] *= wishart_eval( dof[k], scale, self.precisions[pj[k]], dV=_dets, dW=self._detp[pj[k]], piV=covariance) mp = scale * shrinkage[k] _dP = _dets * shrinkage[k] ** self.dim if perm is None: pp *= normal_eval(means, mp, self.means[k], dP=_dP) else: for j, pj in enumerate(perm): pp[j] *= normal_eval( means, mp, self.means[pj[k]], dP=_dP) return pp def evidence(self, x, z, nperm=0, verbose=0): """ See bayes_factor(self, x, z, nperm=0, verbose=0) """ return self.bayes_factor(self, x, z, nperm, verbose) def bayes_factor(self, x, z, nperm=0, verbose=0): """ Evaluate the Bayes Factor of the current model using Chib's method Parameters ---------- x: array of shape (nb_samples,dim) the data from which bic is computed z: array of shape (nb_samples), type = np.int_ the corresponding classification nperm=0: int the number of permutations to sample to model the label switching issue in the computation of the Bayes Factor By default, exhaustive permutations are used verbose=0: verbosity mode Returns ------- bf (float) the computed evidence (Bayes factor) Notes ----- See: Marginal Likelihood from the Gibbs Output Journal article by Siddhartha Chib; Journal of the American Statistical Association, Vol. 90, 1995 """ niter = z.shape[1] p = [] perm = generate_perm(self.k) if nperm > perm.shape[0]: nperm = perm.shape[0] for i in range(niter): if nperm == 0: temp = self.conditional_posterior_proba(x, z[:, i], perm) p.append(temp.mean()) else: drand = np.argsort(np.random.rand(perm.shape[0]))[:nperm] temp = self.conditional_posterior_proba(x, z[:, i], perm[drand]) p.append(temp.mean()) p = np.array(p) mp = np.mean(p) p0 = self.probability_under_prior() like = self.likelihood(x) bf = np.log(p0) + np.sum(np.log(np.sum(like, 1))) - np.log(mp) if verbose: print(np.log(p0), np.sum(np.log(np.sum(like, 1))), np.log(mp)) return bf # --------------------------------------------------------- # --- Variational Bayes inference ------------------------- # --------------------------------------------------------- class VBGMM(BGMM): """ Subclass of Bayesian GMMs (BGMM) that implements Variational Bayes estimation of the parameters """ def __init__(self, k=1, dim=1, means=None, precisions=None, weights=None, shrinkage=None, dof=None): BGMM.__init__(self, k, dim, means, precisions, weights, shrinkage, dof) self.scale = self.precisions.copy() def _Estep(self, x): """VB-E step Parameters ---------- x array of shape (nb_samples,dim) the data used in the estimation process Returns ------- like: array of shape(nb_samples,self.k), component-wise likelihood """ n = x.shape[0] like = np.zeros((n, self.k)) from scipy.special import psi spsi = psi(np.sum(self.weights)) for k in range(self.k): # compute the data-independent factor first w0 = psi(self.weights[k]) - spsi w0 += 0.5 * np.log(detsh(self.scale[k])) w0 -= self.dim * 0.5 / self.shrinkage[k] w0 += 0.5 * np.log(2) * self.dim for i in range(self.dim): w0 += 0.5 * psi((self.dof[k] - i) / 2) m = np.reshape(self.means[k], (1, self.dim)) b = self.dof[k] * self.scale[k] q = np.sum(np.dot(m - x, b) * (m - x), 1) w = w0 - q / 2 w -= 0.5 * np.log(2 * np.pi) * self.dim like[:, k] = np.exp(w) if like.min() < 0: raise ValueError('Likelihood cannot be negative') return like def evidence(self, x, like=None, verbose=0): """computation of evidence bound aka free energy Parameters ---------- x array of shape (nb_samples,dim) the data from which evidence is computed like=None: array of shape (nb_samples, self.k), optional component-wise likelihood If None, it is recomputed verbose=0: verbosity model Returns ------- ev (float) the computed evidence """ from numpy.linalg import inv from scipy.special import psi tiny = 1.e-15 if like is None: like = self._Estep(x) like = (like.T / np.maximum(like.sum(1), tiny)).T pop = like.sum(0)[:self.k] pop = np.reshape(pop, (self.k, 1)) spsi = psi(np.sum(self.weights)) empmeans = np.dot(like.T[:self.k], x) / np.maximum(pop, tiny) F = 0 # start with the average likelihood term for k in range(self.k): # compute the data-independent factor first Lav = psi(self.weights[k]) - spsi Lav -= np.sum(like[:, k] * np.log(np.maximum(like[:, k], tiny))) \ / pop[k] Lav -= 0.5 * self.dim * np.log(2 * np.pi) Lav += 0.5 * np.log(detsh(self.scale[k])) Lav += 0.5 * np.log(2) * self.dim for i in range(self.dim): Lav += 0.5 * psi((self.dof[k] - i) / 2) Lav -= self.dim * 0.5 / self.shrinkage[k] Lav *= pop[k] empcov = np.zeros((self.dim, self.dim)) dx = x - empmeans[k] empcov = np.dot(dx.T, like[:, k:k + 1] * dx) Lav -= 0.5 * np.trace(np.dot(empcov, self.scale[k] * self.dof[k])) F += Lav #then the KL divergences prior_covariance = np.array(self._inv_prior_scale) covariance = np.array([inv(self.scale[k]) for k in range(self.k)]) Dklw = 0 Dklg = 0 Dkld = dkl_dirichlet(self.weights, self.prior_weights) for k in range(self.k): Dklw += dkl_wishart(self.dof[k], covariance[k], self.prior_dof[k], prior_covariance[k]) nc = self.scale[k] * (self.dof[k] * self.shrinkage[k]) nc0 = self.scale[k] * (self.dof[k] * self.prior_shrinkage[k]) Dklg += dkl_gaussian(self.means[k], nc, self.prior_means[k], nc0) Dkl = Dkld + Dklg + Dklw if verbose: print('Lav', F, 'Dkl', Dkld, Dklg, Dklw) F -= Dkl return F def _Mstep(self, x, like): """VB-M step Parameters ---------- x: array of shape(nb_samples, self.dim) the data from which the model is estimated like: array of shape(nb_samples, self.k) the likelihood of the data under each class """ from numpy.linalg import inv tiny = 1.e-15 pop = like.sum(0) # shrinkage, weights,dof self.weights = self.prior_weights + pop pop = pop[0:self.k] like = like[:, :self.k] self.shrinkage = self.prior_shrinkage + pop self.dof = self.prior_dof + pop #reshape pop = np.reshape(pop, (self.k, 1)) prior_shrinkage = np.reshape(self.prior_shrinkage, (self.k, 1)) shrinkage = np.reshape(self.shrinkage, (self.k, 1)) # means means = np.dot(like.T, x) + self.prior_means * prior_shrinkage self.means = means / shrinkage #precisions empmeans = np.dot(like.T, x) / np.maximum(pop, tiny) empcov = np.zeros(np.shape(self.prior_scale)) for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx) covariance = np.array(self._inv_prior_scale) + empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1, 1)) covariance += addcov * apms # update scale self.scale = np.array([inv(covariance[k]) for k in range(self.k)]) def initialize(self, x): """ initialize z using a k-means algorithm, then update the parameters Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process """ n = x.shape[0] if self.k > 1: cent, z, J = kmeans(x, self.k) else: z = np.zeros(x.shape[0]).astype(np.int_) l = np.zeros((n, self.k)) l[np.arange(n), z] = 1 self._Mstep(x, l) def map_label(self, x, like=None): """ return the MAP labelling of x Parameters ---------- x array of shape (nb_samples,dim) the data under study like=None array of shape(nb_samples,self.k) component-wise likelihood if like==None, it is recomputed Returns ------- z: array of shape(nb_samples): the resulting MAP labelling of the rows of x """ if like is None: like = self.likelihood(x) z = np.argmax(like, 1) return z def estimate(self, x, niter=100, delta=1.e-4, verbose=0): """estimation of self given x Parameters ---------- x array of shape (nb_samples,dim) the data from which the model is estimated z = None: array of shape (nb_samples) a prior labelling of the data to initialize the computation niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared verbose=0: verbosity mode """ # alternation of E/M step until convergence tiny = 1.e-15 av_ll_old = - np.inf for i in range(niter): like = self._Estep(x) av_ll = np.mean(np.log(np.maximum(np.sum(like, 1), tiny))) if av_ll < av_ll_old + delta: if verbose: print('iteration:', i, 'log-likelihood:', av_ll, 'old value:', av_ll_old) break else: av_ll_old = av_ll if verbose: print(i, av_ll, self.bic(like)) like = (like.T / np.maximum(like.sum(1), tiny)).T self._Mstep(x, like) def likelihood(self, x): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x: array of shape (nb_samples, self.dim) the data used in the estimation process Returns ------- like: array of shape(nb_samples, self.k) component-wise likelihood """ x = self.check_x(x) return self._Estep(x) def pop(self, like, tiny=1.e-15): """ compute the population, i.e. the statistics of allocation Parameters ---------- like array of shape (nb_samples, self.k): the likelihood of each item being in each class """ slike = np.maximum(tiny, np.sum(like, 1)) nlike = (like.T / slike).T return np.sum(nlike, 0) nipy-0.6.1/nipy/algorithms/clustering/ggmixture.py000066400000000000000000000504321470056100100223670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ One-dimensional Gamma-Gaussian mixture density classes : Given a set of points the algo provides approcumate maximum likelihood estimates of the mixture distribution using an EM algorithm. Author: Bertrand Thirion and Merlin Keller 2005-2008 """ import numpy as np import scipy.special as sp import scipy.stats as st ############################################################################# # Auxiliary functions ####################################################### ############################################################################# def _dichopsi_log(u, v, y, eps=0.00001): """ Implements the dichotomic part of the solution of psi(c)-log(c)=y """ if u > v: u, v = v, u t = (u + v) / 2 if np.absolute(u - v) < eps: return t else: if sp.psi(t) - np.log(t) > y: return _dichopsi_log(u, t, y, eps) else: return _dichopsi_log(t, v, y, eps) def _psi_solve(y, eps=0.00001): """ Solve psi(c)-log(c)=y by dichotomy """ if y > 0: print("y", y) raise ValueError("y>0, the problem cannot be solved") u = 1. if y > sp.psi(u) - np.log(u): while sp.psi(u) - np.log(u) < y: u *= 2 u /= 2 else: while sp.psi(u) - np.log(u) > y: u /= 2 return _dichopsi_log(u, 2 * u, y, eps) def _compute_c(x, z, eps=0.00001): """ this function returns the mle of the shape parameter if a 1D gamma density """ eps = 1.e-7 y = np.dot(z, np.log(x)) / np.sum(z) - np.log(np.dot(z, x) / np.sum(z)) if y > - eps: c = 10 else: c = _psi_solve(y, eps=0.00001) return c def _gaus_dens(mean, var, x): """ evaluate the gaussian density (mean,var) at points x """ Q = - (x - mean) ** 2 / (2 * var) return 1. / np.sqrt(2 * np.pi * var) * np.exp(Q) def _gam_dens(shape, scale, x): """evaluate the gamma density (shape,scale) at points x Notes ----- Returns 0 on negative subspace """ ng = np.zeros(np.size(x)) cst = - shape * np.log(scale) - sp.gammaln(shape) i = np.ravel(np.nonzero(x > 0)) if np.size(i) > 0: lz = cst + (shape - 1) * np.log(x[i]) - x[i] / scale ng[i] = np.exp(lz) return ng def _gam_param(x, z): """ Compute the parameters of a gamma density from data weighted points Parameters ---------- x: array of shape(nbitem) the learning points z: array of shape(nbitem), their membership within the class Notes ----- if no point is positive then the couple (1, 1) is returned """ eps = 1.e-5 i = np.ravel(np.nonzero(x > 0)) szi = np.sum(z[i]) if szi > 0: shape = _compute_c(x[i], z[i], eps) scale = np.dot(x[i], z[i]) / (szi * shape) else: shape = 1 scale = 1 return shape, scale ############################################################################## # class `Gamma` ############################################################################## class Gamma: """ Basic one dimensional Gaussian-Gamma Mixture estimation class Note that it can work with positive or negative values, as long as there is at least one positive value. NB : The gamma distribution is defined only on positive values. 5 parameters are used: - mean: gaussian mean - var: gaussian variance - shape: gamma shape - scale: gamma scale - mixt: mixture parameter (weight of the gamma) """ def __init__(self, shape=1, scale=1): self.shape = shape self.scale = scale def parameters(self): print("shape: ", self.shape, "scale: ", self.scale) def check(self, x): if (x.min() < 0): raise ValueError("negative values in input") def estimate(self, x, eps=1.e-7): """ ML estimation of the Gamma parameters """ self.check(x) n = np.size(x) y = np.sum(np.log(x)) / n - np.log(np.sum(x) / n) if y > - eps: self.shape = 1 else: self.shape = _psi_solve(y) self.scale = np.sum(x) / (n * self.shape) ############################################################################## # Gamma-Gaussian Mixture class ############################################################################## class GGM: """ This is the basic one dimensional Gaussian-Gamma Mixture estimation class Note that it can work with positive or negative values, as long as there is at least one positive value. NB : The gamma distribution is defined only on positive values. 5 scalar members - mean: gaussian mean - var: gaussian variance (non-negative) - shape: gamma shape (non-negative) - scale: gamma scale (non-negative) - mixt: mixture parameter (non-negative, weight of the gamma) """ def __init__(self, shape=1, scale=1, mean=0, var=1, mixt=0.5): self.shape = shape self.scale = scale self.mean = mean self.var = var self.mixt = mixt def parameters(self): """ print the parameters of self """ print("Gaussian: mean: ", self.mean, "variance: ", self.var) print("Gamma: shape: ", self.shape, "scale: ", self.scale) print("Mixture gamma: ", self.mixt, "Gaussian: ", 1 - self.mixt) def Mstep(self, x, z): """ Mstep of the model: maximum likelihood estimation of the parameters of the model Parameters ---------- x : array of shape (nbitems,) input data z array of shape(nbitrems, 2) the membership matrix """ # z[0,:] is the likelihood to be generated by the gamma # z[1,:] is the likelihood to be generated by the gaussian tiny = 1.e-15 sz = np.maximum(tiny, np.sum(z, 0)) self.shape, self.scale = _gam_param(x, z[:, 0]) self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] self.mixt = sz[0] / np.size(x) def Estep(self, x): """ E step of the estimation: Estimation of ata membsership Parameters ---------- x: array of shape (nbitems,) input data Returns ------- z: array of shape (nbitems, 2) the membership matrix """ eps = 1.e-15 z = np.zeros((np.size(x), 2), 'd') z[:, 0] = _gam_dens(self.shape, self.scale, x) z[:, 1] = _gaus_dens(self.mean, self.var, x) z = z * np.array([self.mixt, 1. - self.mixt]) sz = np.maximum(np.sum(z, 1), eps) L = np.sum(np.log(sz)) / np.size(x) z = (z.T / sz).T return z, L def estimate(self, x, niter=10, delta=0.0001, verbose=False): """ Complete EM estimation procedure Parameters ---------- x : array of shape (nbitems,) the data to be processed niter : int, optional max nb of iterations delta : float, optional criterion for convergence verbose : bool, optional If True, print values during iterations Returns ------- LL, float average final log-likelihood """ if x.max() < 0: # all the values are generated by the Gaussian self.mean = np.mean(x) self.var = np.var(x) self.mixt = 0. L = 0.5 * (1 + np.log(2 * np.pi * self.var)) return L # proceed with standard estimate z, L = self.Estep(x) L0 = L - 2 * delta for i in range(niter): self.Mstep(x, z) z, L = self.Estep(x) if verbose: print(i, L) if (L < L0 + delta): break L0 = L return L def show(self, x): """ Visualization of the mm based on the empirical histogram of x Parameters ---------- x : array of shape (nbitems,) the data to be processed """ step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) h, c = np.histogram(x, bins) h = h.astype(np.float64) / np.size(x) p = self.mixt dc = c[1] - c[0] y = (1 - p) * _gaus_dens(self.mean, self.var, c) * dc z = np.zeros(np.size(c)) z = _gam_dens(self.shape, self.scale, c) * p * dc import matplotlib.pyplot as plt plt.figure() plt.plot(0.5 * (c[1:] + c[:-1]), h) plt.plot(c, y, 'r') plt.plot(c, z, 'g') plt.plot(c, z + y, 'k') plt.title('Fit of the density with a Gamma-Gaussians mixture') plt.legend(('data', 'gaussian acomponent', 'gamma component', 'mixture distribution')) def posterior(self, x): """Posterior probability of observing the data x for each component Parameters ---------- x: array of shape (nbitems,) the data to be processed Returns ------- y, pg : arrays of shape (nbitem) the posterior probability """ p = self.mixt pg = p * _gam_dens(self.shape, self.scale, x) y = (1 - p) * _gaus_dens(self.mean, self.var, x) return y / (y + pg), pg / (y + pg) ############################################################################## # double-Gamma-Gaussian Mixture class ############################################################################## class GGGM: """ The basic one dimensional Gamma-Gaussian-Gamma Mixture estimation class, where the first gamma has a negative sign, while the second one has a positive sign. 7 parameters are used: - shape_n: negative gamma shape - scale_n: negative gamma scale - mean: gaussian mean - var: gaussian variance - shape_p: positive gamma shape - scale_p: positive gamma scale - mixt: array of mixture parameter (weights of the n-gamma,gaussian and p-gamma) """ def __init__(self, shape_n=1, scale_n=1, mean=0, var=1, shape_p=1, scale_p=1, mixt=np.array([1.0, 1.0, 1.0]) / 3): """ Constructor Parameters ----------- shape_n : float, optional scale_n: float, optional parameters of the nehative gamma; must be positive mean : float, optional var : float, optional parameters of the gaussian ; var must be positive shape_p : float, optional scale_p : float, optional parameters of the positive gamma; must be positive mixt : array of shape (3,), optional the mixing proportions; they should be positive and sum to 1 """ self.shape_n = shape_n self.scale_n = scale_n self.mean = mean self.var = var self.shape_p = shape_p self.scale_p = scale_p self.mixt = mixt def parameters(self): """ Print the parameters """ print("Negative Gamma: shape: ", self.shape_n, "scale: ", self.scale_n) print("Gaussian: mean: ", self.mean, "variance: ", self.var) print("Positive Gamma: shape: ", self.shape_p, "scale: ", self.scale_p) mixt = self.mixt print("Mixture neg. gamma: ", mixt[0], "Gaussian: ", mixt[1], "pos. gamma: ", mixt[2]) def init(self, x, mixt=None): """ initialization of the different parameters Parameters ---------- x: array of shape(nbitems) the data to be processed mixt : None or array of shape(3), optional prior mixing proportions. If None, the classes have equal weight """ if mixt is not None: if np.size(mixt) == 3: self.mixt = np.ravel(mixt) else: raise ValueError('bad size for mixt') # gaussian self.mean = np.mean(x) self.var = np.var(x) # negative gamma i = np.ravel(np.nonzero(x < 0)) if np.size(i) > 0: mn = - np.mean(x[i]) vn = np.var(x[i]) self.scale_n = vn / mn self.shape_n = mn ** 2 / vn else: self.mixt[0] = 0 # positive gamma i = np.ravel(np.nonzero(x > 0)) if np.size(i) > 0: mp = np.mean(x[i]) vp = np.var(x[i]) self.scale_p = vp / mp self.shape_p = mp ** 2 / vp else: self.mixt[2] = 0 # mixing proportions self.mixt = self.mixt / np.sum(self.mixt) def init_fdr(self, x, dof=-1, copy=True): """ Initialization of the class based on a fdr heuristic: the probability to be in the positive component is proportional to the 'positive fdr' of the data. The same holds for the negative part. The point is that the gamma parts should model nothing more that the tails of the distribution. Parameters ---------- x: array of shape (nbitem) the data under consideration dof: integer, optional number of degrees of freedom if x is thought to be a Student variate. By default, it is handled as a normal copy: boolean, optional If True, copy the data. """ # Safeguard ourselves against modifications of x, both by our # code, and by external code. if copy: x = x.copy() # positive gamma i = np.ravel(np.nonzero(x > 0)) from ..statistics.empirical_pvalue import fdr if np.size(i) > 0: if dof < 0: pvals = st.norm.sf(x) else: pvals = st.t.sf(x, dof) q = fdr(pvals) z = 1 - q[i] self.mixt[2] = np.maximum(0.5, z.sum()) / np.size(x) self.shape_p, self.scale_p = _gam_param(x[i], z) else: self.mixt[2] = 0 # negative gamma i = np.ravel(np.nonzero(x < 0)) if np.size(i) > 0: if dof < 0: pvals = st.norm.cdf(x) else: pvals = st.t.cdf(x, dof) q = fdr(pvals) z = 1 - q[i] self.shape_n, self.scale_n = _gam_param( - x[i], z) self.mixt[0] = np.maximum(0.5, z.sum()) / np.size(x) else: self.mixt[0] = 0 self.mixt[1] = 1 - self.mixt[0] - self.mixt[2] def Mstep(self, x, z): """ Mstep of the estimation: Maximum likelihood update the parameters of the three components Parameters ------------ x: array of shape (nbitem,) input data z: array of shape (nbitems,3) probabilistic membership """ tiny = 1.e-15 sz = np.maximum(np.sum(z, 0), tiny) self.mixt = sz / np.sum(sz) # negative gamma self.shape_n, self.scale_n = _gam_param( - x, z[:, 0]) # gaussian self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] # positive gamma self.shape_p, self.scale_p = _gam_param(x, z[:, 2]) def Estep(self, x): """ Update probabilistic memberships of the three components Parameters ---------- x: array of shape (nbitems,) the input data Returns ------- z: ndarray of shape (nbitems, 3) probabilistic membership Notes ----- z[0,:] is the membership the negative gamma z[1,:] is the membership of the gaussian z[2,:] is the membership of the positive gamma """ tiny = 1.e-15 z = np.array(self.component_likelihood(x)).T * self.mixt sz = np.maximum(tiny, np.sum(z, 1)) L = np.mean(np.log(sz)) z = (z.T / sz).T return z, L def estimate(self, x, niter=100, delta=1.e-4, bias=0, verbose=0, gaussian_mix=0): """ Whole EM estimation procedure: Parameters ---------- x: array of shape (nbitem) input data niter: integer, optional max number of iterations delta: float, optional increment in LL at which convergence is declared bias: float, optional lower bound on the gaussian variance (to avoid shrinkage) gaussian_mix: float, optional if nonzero, lower bound on the gaussian mixing weight (to avoid shrinkage) verbose: 0, 1 or 2 verbosity level Returns ------- z: array of shape (nbitem, 3) the membership matrix """ z, L = self.Estep(x) L0 = L - 2 * delta for i in range(niter): self.Mstep(x, z) # Constraint the Gaussian variance if bias > 0: self.var = np.maximum(bias, self.var) # Constraint the Gaussian mixing ratio if gaussian_mix > 0 and self.mixt[1] < gaussian_mix: upper, gaussian, lower = self.mixt upper_to_lower = upper / (lower + upper) gaussian = gaussian_mix upper = (1 - gaussian_mix) * upper_to_lower lower = 1 - gaussian_mix - upper self.mixt = lower, gaussian, upper z, L = self.Estep(x) if verbose: print(i, L) if (L < L0 + delta): break L0 = L return z def posterior(self, x): """ Compute the posterior probability of the three components given the data Parameters ----------- x: array of shape (nbitem,) the data under evaluation Returns -------- ng,y,pg: three arrays of shape(nbitem) the posteriori of the 3 components given the data Notes ----- ng + y + pg = np.ones(nbitem) """ p = self.mixt ng, y, pg = self.component_likelihood(x) total = ng * p[0] + y * p[1] + pg * p[2] return ng * p[0] / total, y * p[1] / total, pg * p[2] / total def component_likelihood(self, x): """ Compute the likelihood of the data x under the three components negative gamma, gaussina, positive gaussian Parameters ----------- x: array of shape (nbitem,) the data under evaluation Returns -------- ng,y,pg: three arrays of shape(nbitem) The likelihood of the data under the 3 components """ ng = _gam_dens(self.shape_n, self.scale_n, - x) y = _gaus_dens(self.mean, self.var, x) pg = _gam_dens(self.shape_p, self.scale_p, x) return ng, y, pg def show(self, x, mpaxes=None): """ Visualization of mixture shown on the empirical histogram of x Parameters ---------- x: ndarray of shape (nditem,) data mpaxes: matplotlib axes, optional axes handle used for the plot if None, new axes are created. """ import matplotlib.pyplot as plt step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) h, c = np.histogram(x, bins) h = h.astype(np.float64) / np.size(x) dc = c[1] - c[0] ng = self.mixt[0] * _gam_dens(self.shape_n, self.scale_n, - c) y = self.mixt[1] * _gaus_dens(self.mean, self.var, c) pg = self.mixt[2] * _gam_dens(self.shape_p, self.scale_p, c) z = y + pg + ng if mpaxes is None: plt.figure() ax = plt.subplot(1, 1, 1) else: ax = mpaxes ax.plot(0.5 * (c[1:] + c[:-1]), h / dc, linewidth=2, label='data') ax.plot(c, ng, 'c', linewidth=2, label='negative gamma component') ax.plot(c, y, 'r', linewidth=2, label='Gaussian component') ax.plot(c, pg, 'g', linewidth=2, label='positive gamma component') ax.plot(c, z, 'k', linewidth=2, label='mixture distribution') ax.set_title('Fit of the density with a Gamma-Gaussian mixture', fontsize=12) l = ax.legend() for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) nipy-0.6.1/nipy/algorithms/clustering/gmm.py000066400000000000000000000711531470056100100211370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Gaussian Mixture Model Class: contains the basic fields and methods of GMMs The class GMM _old uses C bindings which are computationally and memory efficient. Author : Bertrand Thirion, 2006-2009 """ import numpy as np from scipy.linalg import eigvalsh class GridDescriptor: """ A tiny class to handle cartesian grids """ def __init__(self, dim=1, lim=None, n_bins=None): """ Parameters ---------- dim: int, optional, the dimension of the grid lim: list of len(2*self.dim), the limits of the grid as (xmin, xmax, ymin, ymax, ...) n_bins: list of len(self.dim), the number of bins in each direction """ self.dim = dim if lim is not None: self.set(lim, n_bins) if np.size(n_bins) == self.dim: self.n_bins = np.ravel(np.array(n_bins)) def set(self, lim, n_bins=10): """ set the limits of the grid and the number of bins Parameters ---------- lim: list of len(2*self.dim), the limits of the grid as (xmin, xmax, ymin, ymax, ...) n_bins: list of len(self.dim), optional the number of bins in each direction """ if len(lim) == 2 * self.dim: self.lim = lim else: raise ValueError("Wrong dimension for grid definition") if np.size(n_bins) == self.dim: self.n_bins = np.ravel(np.array(n_bins)) else: raise ValueError("Wrong dimension for grid definition") def make_grid(self): """ Compute the grid points Returns ------- grid: array of shape (nb_nodes, self.dim) where nb_nodes is the prod of self.n_bins """ size = np.prod(self.n_bins) grid = np.zeros((size, self.dim)) grange = [] for j in range(self.dim): xm = self.lim[2 * j] xM = self.lim[2 * j + 1] if np.isscalar(self.n_bins): xb = self.n_bins else: xb = self.n_bins[j] gr = xm + float(xM - xm) / (xb - 1) * np.arange(xb).astype('f') grange.append(gr) if self.dim == 1: grid = np.array([[grange[0][i]] for i in range(xb)]) if self.dim == 2: for i in range(self.n_bins[0]): for j in range(self.n_bins[1]): grid[i * self.n_bins[1] + j] = np.array( [grange[0][i], grange[1][j]]) if self.dim == 3: for i in range(self.n_bins[0]): for j in range(self.n_bins[1]): for k in range(self.n_bins[2]): q = (i * self.n_bins[1] + j) * self.n_bins[2] + k grid[q] = np.array([grange[0][i], grange[1][j], grange[2][k]]) if self.dim > 3: raise NotImplementedError( 'only dimensions <4 are currently handled') return grid def best_fitting_GMM(x, krange, prec_type='full', niter=100, delta=1.e-4, ninit=1, verbose=0): """ Given a certain dataset x, find the best-fitting GMM with a number k of classes in a certain range defined by krange Parameters ---------- x: array of shape (n_samples,dim) the data from which the model is estimated krange: list of floats, the range of values to test for k prec_type: string (to be chosen within 'full','diag'), optional, the covariance parameterization niter: int, optional, maximal number of iterations in the estimation process delta: float, optional, increment of data likelihood at which convergence is declared ninit: int number of initialization performed verbose=0: verbosity mode Returns ------- mg : the best-fitting GMM instance """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) dim = x.shape[1] bestbic = - np.inf for k in krange: lgmm = GMM(k, dim, prec_type) gmmk = lgmm.initialize_and_estimate(x, None, niter, delta, ninit, verbose) bic = gmmk.evidence(x) if bic > bestbic: bestbic = bic bgmm = gmmk if verbose: print('k', k, 'bic', bic) return bgmm def plot2D(x, my_gmm, z=None, with_dots=True, log_scale=False, mpaxes=None, verbose=0): """ Given a set of points in a plane and a GMM, plot them Parameters ---------- x: array of shape (npoints, dim=2), sample points my_gmm: GMM instance, whose density has to be plotted z: array of shape (npoints), optional that gives a labelling of the points in x by default, it is not taken into account with_dots, bool, optional whether to plot the dots or not log_scale: bool, optional whether to plot the likelihood in log scale or not mpaxes=None, int, optional if not None, axes handle for plotting verbose: verbosity mode, optional Returns ------- gd, GridDescriptor instance, that represents the grid used in the function ax, handle to the figure axes Notes ----- ``my_gmm`` is assumed to have have a 'nixture_likelihood' method that takes an array of points of shape (np, dim) and returns an array of shape (np,my_gmm.k) that represents the likelihood component-wise """ import matplotlib.pyplot as plt if x.shape[1] != my_gmm.dim: raise ValueError('Incompatible dimension between data and model') if x.shape[1] != 2: raise ValueError('this works only for 2D cases') gd1 = GridDescriptor(2) xmin, xmax = x.min(0), x.max(0) xm = 1.1 * xmin[0] - 0.1 * xmax[0] xs = 1.1 * xmax[0] - 0.1 * xmin[0] ym = 1.1 * xmin[1] - 0.1 * xmax[1] ys = 1.1 * xmax[1] - 0.1 * xmin[1] gd1.set([xm, xs, ym, ys], [51, 51]) grid = gd1.make_grid() L = my_gmm.mixture_likelihood(grid) if verbose: intl = L.sum() * (xs - xm) * (ys - ym) / 2500 print('integral of the density on the domain ', intl) if mpaxes is None: plt.figure() ax = plt.subplot(1, 1, 1) else: ax = mpaxes gdx = gd1.n_bins[0] Pdens = np.reshape(L, (gdx, -1)) extent = [xm, xs, ym, ys] if log_scale: plt.imshow(np.log(Pdens.T), origin='lower', extent=extent) else: plt.imshow(Pdens.T, origin='lower', extent=extent) if with_dots: if z is None: plt.plot(x[:, 0], x[:, 1], 'o') else: hsv = plt.cm.hsv(list(range(256))) col = hsv[::(256 // int(z.max() + 1))] for k in range(z.max() + 1): plt.plot(x[z == k, 0], x[z == k, 1], 'o', color=col[k]) plt.axis(extent) plt.colorbar() return gd1, ax class GMM: """Standard GMM. this class contains the following members k (int): the number of components in the mixture dim (int): is the dimension of the data prec_type = 'full' (string) is the parameterization of the precisions/covariance matrices: either 'full' or 'diagonal'. means: array of shape (k,dim): all the means (mean parameters) of the components precisions: array of shape (k,dim,dim): the precisions (inverse covariance matrix) of the components weights: array of shape(k): weights of the mixture fixme ----- no copy method """ def __init__(self, k=1, dim=1, prec_type='full', means=None, precisions=None, weights=None): """ Initialize the structure, at least with the dimensions of the problem Parameters ---------- k (int) the number of classes of the model dim (int) the dimension of the problem prec_type = 'full' : coavriance:precision parameterization (diagonal 'diag' or full 'full'). means = None: array of shape (self.k,self.dim) precisions = None: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) weights=None: array of shape (self.k) By default, means, precision and weights are set as zeros() eye() 1/k ones() with the correct dimensions """ self.k = k self.dim = dim self.prec_type = prec_type self.means = means self.precisions = precisions self.weights = weights if self.means is None: self.means = np.zeros((self.k, self.dim)) if self.precisions is None: if prec_type == 'full': prec = np.reshape(np.eye(self.dim), (1, self.dim, self.dim)) self.precisions = np.repeat(prec, self.k, 0) else: self.precisions = np.ones((self.k, self.dim)) if self.weights is None: self.weights = np.ones(self.k) * 1.0 / self.k def plugin(self, means, precisions, weights): """ Set manually the weights, means and precision of the model Parameters ---------- means: array of shape (self.k,self.dim) precisions: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) weights: array of shape (self.k) """ self.means = means self.precisions = precisions self.weights = weights self.check() def check(self): """ Checking the shape of different matrices involved in the model """ if self.means.shape[0] != self.k: raise ValueError("self.means does not have correct dimensions") if self.means.shape[1] != self.dim: raise ValueError("self.means does not have correct dimensions") if self.weights.size != self.k: raise ValueError("self.weights does not have correct dimensions") if self.dim != self.precisions.shape[1]: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type == 'full': if self.dim != self.precisions.shape[2]: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type == 'diag': if np.shape(self.precisions) != np.shape(self.means): raise ValueError( "self.precisions does not have correct dimensions") if self.precisions.shape[0] != self.k: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type not in ['full', 'diag']: raise ValueError('unknown precisions type') def check_x(self, x): """ essentially check that x.shape[1]==self.dim x is returned with possibly reshaping """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) if x.shape[1] != self.dim: raise ValueError('incorrect size for x') return x def initialize(self, x): """Initializes self according to a certain dataset x: 1. sets the regularizing hyper-parameters 2. initializes z using a k-means algorithm, then 3. update the parameters Parameters ---------- x, array of shape (n_samples,self.dim) the data used in the estimation process """ from .utils import kmeans n = x.shape[0] #1. set the priors self.guess_regularizing(x, bcheck=1) # 2. initialize the memberships if self.k > 1: _, z, _ = kmeans(x, self.k) else: z = np.zeros(n).astype(np.int_) l = np.zeros((n, self.k)) l[np.arange(n), z] = 1 # 3.update the parameters self.update(x, l) def pop(self, like, tiny=1.e-15): """compute the population, i.e. the statistics of allocation Parameters ---------- like: array of shape (n_samples,self.k): the likelihood of each item being in each class """ sl = np.maximum(tiny, np.sum(like, 1)) nl = (like.T / sl).T return np.sum(nl, 0) def update(self, x, l): """ Identical to self._Mstep(x,l) """ self._Mstep(x, l) def likelihood(self, x): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) component-wise likelihood """ like = self.unweighted_likelihood(x) like *= self.weights return like def unweighted_likelihood_(self, x): """ return the likelihood of each data for each component the values are not weighted by the component weights Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) unweighted component-wise likelihood """ n = x.shape[0] like = np.zeros((n, self.k)) for k in range(self.k): # compute the data-independent factor first w = - np.log(2 * np.pi) * self.dim m = np.reshape(self.means[k], (1, self.dim)) b = self.precisions[k] if self.prec_type == 'full': w += np.log(eigvalsh(b)).sum() dx = m - x q = np.sum(np.dot(dx, b) * dx, 1) else: w += np.sum(np.log(b)) q = np.dot((m - x) ** 2, b) w -= q w /= 2 like[:, k] = np.exp(w) return like def unweighted_likelihood(self, x): """ return the likelihood of each data for each component the values are not weighted by the component weights Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) unweighted component-wise likelihood Notes ----- Hopefully faster """ xt = x.T.copy() n = x.shape[0] like = np.zeros((n, self.k)) for k in range(self.k): # compute the data-independent factor first w = - np.log(2 * np.pi) * self.dim m = np.reshape(self.means[k], (self.dim, 1)) b = self.precisions[k] if self.prec_type == 'full': w += np.log(eigvalsh(b)).sum() dx = xt - m sqx = dx * np.dot(b, dx) q = np.zeros(n) for d in range(self.dim): q += sqx[d] else: w += np.sum(np.log(b)) q = np.dot(b, (m - xt) ** 2) w -= q w /= 2 like[:, k] = np.exp(w) return like def mixture_likelihood(self, x): """Returns the likelihood of the mixture for x Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process """ x = self.check_x(x) like = self.likelihood(x) sl = np.sum(like, 1) return sl def average_log_like(self, x, tiny=1.e-15): """returns the averaged log-likelihood of the mode for the dataset x Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process tiny = 1.e-15: a small constant to avoid numerical singularities """ x = self.check_x(x) like = self.likelihood(x) sl = np.sum(like, 1) sl = np.maximum(sl, tiny) return np.mean(np.log(sl)) def evidence(self, x): """Computation of bic approximation of evidence Parameters ---------- x array of shape (n_samples,dim) the data from which bic is computed Returns ------- the bic value """ x = self.check_x(x) tiny = 1.e-15 like = self.likelihood(x) return self.bic(like, tiny) def bic(self, like, tiny=1.e-15): """Computation of bic approximation of evidence Parameters ---------- like, array of shape (n_samples, self.k) component-wise likelihood tiny=1.e-15, a small constant to avoid numerical singularities Returns ------- the bic value, float """ sl = np.sum(like, 1) sl = np.maximum(sl, tiny) bicc = np.sum(np.log(sl)) # number of parameters n = like.shape[0] if self.prec_type == 'full': eta = self.k * (1 + self.dim + (self.dim * self.dim + 1) / 2) - 1 else: eta = self.k * (1 + 2 * self.dim) - 1 bicc = bicc - np.log(n) * eta return bicc def _Estep(self, x): """ E step of the EM algo returns the likelihood per class of each data item Parameters ---------- x array of shape (n_samples,dim) the data used in the estimation process Returns ------- likelihood array of shape(n_samples,self.k) component-wise likelihood """ return self.likelihood(x) def guess_regularizing(self, x, bcheck=1): """ Set the regularizing priors as weakly informative according to Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x array of shape (n_samples,dim) the data used in the estimation process """ small = 0.01 # the mean of the data mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.dot(dx.T, dx) / x.shape[0] if self.prec_type == 'full': px = np.reshape(np.diag(1.0 / np.diag(vx)), (1, self.dim, self.dim)) else: px = np.reshape(1.0 / np.diag(vx), (1, self.dim)) px *= np.exp(2.0 / self.dim * np.log(self.k)) self.prior_means = np.repeat(mx, self.k, 0) self.prior_weights = np.ones(self.k) / self.k self.prior_scale = np.repeat(px, self.k, 0) self.prior_dof = self.dim + 2 self.prior_shrinkage = small self.weights = np.ones(self.k) * 1.0 / self.k if bcheck: self.check() def _Mstep(self, x, like): """ M step regularized according to the procedure of Fraley et al. 2007 Parameters ---------- x: array of shape(n_samples,self.dim) the data from which the model is estimated like: array of shape(n_samples,self.k) the likelihood of the data under each class """ from numpy.linalg import pinv tiny = 1.e-15 pop = self.pop(like) sl = np.maximum(tiny, np.sum(like, 1)) like = (like.T / sl).T # shrinkage,weights,dof self.weights = self.prior_weights + pop self.weights = self.weights / self.weights.sum() # reshape pop = np.reshape(pop, (self.k, 1)) prior_shrinkage = self.prior_shrinkage shrinkage = pop + prior_shrinkage # means means = np.dot(like.T, x) + self.prior_means * prior_shrinkage self.means = means / shrinkage #precisions empmeans = np.dot(like.T, x) / np.maximum(pop, tiny) empcov = np.zeros(np.shape(self.precisions)) if self.prec_type == 'full': for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx) #covariance covariance = np.array([pinv(self.prior_scale[k]) for k in range(self.k)]) covariance += empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1, 1)) covariance += (addcov * apms) dof = self.prior_dof + pop + self.dim + 2 covariance /= np.reshape(dof, (self.k, 1, 1)) # precision self.precisions = np.array([pinv(covariance[k]) \ for k in range(self.k)]) else: for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.sum(dx ** 2 * like[:, k:k + 1], 0) # covariance covariance = np.array([1.0 / self.prior_scale[k] for k in range(self.k)]) covariance += empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.sum(dx[k] ** 2, 0) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1)) covariance += addcov * apms dof = self.prior_dof + pop + self.dim + 2 covariance /= np.reshape(dof, (self.k, 1)) # precision self.precisions = np.array([1.0 / covariance[k] \ for k in range(self.k)]) def map_label(self, x, like=None): """return the MAP labelling of x Parameters ---------- x array of shape (n_samples,dim) the data under study like=None array of shape(n_samples,self.k) component-wise likelihood if like==None, it is recomputed Returns ------- z: array of shape(n_samples): the resulting MAP labelling of the rows of x """ if like is None: like = self.likelihood(x) z = np.argmax(like, 1) return z def estimate(self, x, niter=100, delta=1.e-4, verbose=0): """ Estimation of the model given a dataset x Parameters ---------- x array of shape (n_samples,dim) the data from which the model is estimated niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared verbose=0: verbosity mode Returns ------- bic : an asymptotic approximation of model evidence """ # check that the data is OK x = self.check_x(x) # alternation of E/M step until convergence tiny = 1.e-15 av_ll_old = - np.inf for i in range(niter): l = self._Estep(x) av_ll = np.mean(np.log(np.maximum(np.sum(l, 1), tiny))) if av_ll < av_ll_old + delta: if verbose: print('iteration:', i, 'log-likelihood:', av_ll, 'old value:', av_ll_old) break else: av_ll_old = av_ll if verbose: print(i, av_ll, self.bic(l)) self._Mstep(x, l) return self.bic(l) def initialize_and_estimate(self, x, z=None, niter=100, delta=1.e-4,\ ninit=1, verbose=0): """Estimation of self given x Parameters ---------- x array of shape (n_samples,dim) the data from which the model is estimated z = None: array of shape (n_samples) a prior labelling of the data to initialize the computation niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared ninit=1: number of initialization performed to reach a good solution verbose=0: verbosity mode Returns ------- the best model is returned """ bestbic = - np.inf bestgmm = GMM(self.k, self.dim, self.prec_type) bestgmm.initialize(x) for i in range(ninit): # initialization -> Kmeans self.initialize(x) # alternation of E/M step until convergence bic = self.estimate(x, niter=niter, delta=delta, verbose=0) if bic > bestbic: bestbic = bic bestgmm.plugin(self.means, self.precisions, self.weights) return bestgmm def train(self, x, z=None, niter=100, delta=1.e-4, ninit=1, verbose=0): """Idem initialize_and_estimate """ return self.initialize_and_estimate(x, z, niter, delta, ninit, verbose) def test(self, x, tiny=1.e-15): """Returns the log-likelihood of the mixture for x Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- ll: array of shape(n_samples) the log-likelihood of the rows of x """ return np.log(np.maximum(self.mixture_likelihood(x), tiny)) def show_components(self, x, gd, density=None, mpaxes=None): """Function to plot a GMM -- Currently, works only in 1D Parameters ---------- x: array of shape(n_samples, dim) the data under study gd: GridDescriptor instance density: array os shape(prod(gd.n_bins)) density of the model one the discrete grid implied by gd by default, this is recomputed mpaxes: axes handle to make the figure, optional, if None, a new figure is created """ import matplotlib.pyplot as plt if density is None: density = self.mixture_likelihood(gd.make_grid()) if gd.dim > 1: raise NotImplementedError("only implemented in 1D") step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) xmin = 1.1 * x.min() - 0.1 * x.max() xmax = 1.1 * x.max() - 0.1 * x.min() h, c = np.histogram(x, bins, [xmin, xmax], density=True) # Make code robust to new and old behavior of np.histogram c = c[:len(h)] offset = (xmax - xmin) / (2 * bins) c += offset / 2 grid = gd.make_grid() if mpaxes is None: plt.figure() ax = plt.axes() else: ax = mpaxes ax.plot(c + offset, h, linewidth=2) for k in range(self.k): ax.plot(grid, density[:, k], linewidth=2) ax.set_title('Fit of the density with a mixture of Gaussians', fontsize=12) legend = ['data'] legend.extend(f'component {k}' for k in range(1, self.k + 1)) l = ax.legend(tuple(legend)) for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) def show(self, x, gd, density=None, axes=None): """ Function to plot a GMM, still in progress Currently, works only in 1D and 2D Parameters ---------- x: array of shape(n_samples, dim) the data under study gd: GridDescriptor instance density: array os shape(prod(gd.n_bins)) density of the model one the discrete grid implied by gd by default, this is recomputed """ import matplotlib.pyplot as plt # recompute the density if necessary if density is None: density = self.mixture_likelihood(gd, x) if axes is None: axes = plt.figure() if gd.dim == 1: from ..statistics.empirical_pvalue import smoothed_histogram_from_samples h, c = smoothed_histogram_from_samples(x, normalized=True) offset = (c.max() - c.min()) / (2 * c.size) grid = gd.make_grid() h /= h.sum() h /= (2 * offset) plt.plot(c[: -1] + offset, h) plt.plot(grid, density) if gd.dim == 2: plt.figure() xm, xM, ym, yM = gd.lim[0:3] gd0 = gd.n_bins[0] Pdens = np.reshape(density, (gd0, np.size(density) / gd0)) axes.imshow(Pdens.T, None, None, None, 'nearest', 1.0, None, None, 'lower', [xm, xM, ym, yM]) axes.plot(x[:, 0], x[:, 1], '.k') axes.axis([xm, xM, ym, yM]) return axes nipy-0.6.1/nipy/algorithms/clustering/hierarchical_clustering.py000066400000000000000000000723371470056100100252410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ These routines perform some hierrachical agglomerative clustering of some input data. The following alternatives are proposed: - Distance based average-link - Similarity-based average-link - Distance based maximum-link - Ward's algorithm under graph constraints - Ward's algorithm without graph constraints In this latest version, the results are returned in a 'WeightedForest' structure, which gives access to the clustering hierarchy, facilitates the plot of the result etc. For back-compatibility, *_segment versions of the algorithms have been appended, with the old API (except the qmax parameter, which now represents the number of wanted clusters) Author : Bertrand Thirion,Pamela Guevara, 2006-2009 """ #--------------------------------------------------------------------------- # ------ Routines for Agglomerative Hierarchical Clustering ---------------- # -------------------------------------------------------------------------- from warnings import warn import numpy as np from ..graph.forest import Forest from ..graph.graph import WeightedGraph class WeightedForest(Forest): """ This is a weighted Forest structure, i.e. a tree - each node has one parent and children (hierarchical structure) - some of the nodes can be viewed as leaves, other as roots - the edges within a tree are associated with a weight: +1 from child to parent -1 from parent to child - additionally, the nodes have a value, which is called 'height', especially useful from dendrograms members ------- V : (int, >0) the number of vertices E : (int) the number of edges parents: array of shape (self.V) the parent array edges: array of shape (self.E,2) reprensenting pairwise neighbors weights, array of shape (self.E), +1/-1 for scending/descending links children: list of arrays that represents the children of any node height: array of shape(self.V) """ def __init__(self, V, parents=None, height=None): """ Parameters ---------- V: the number of edges of the graph parents=None: array of shape (V) the parents of the graph by default, the parents are set to range(V), i.e. each node is its own parent, and each node is a tree height=None: array of shape(V) the height of the nodes """ V = int(V) if V < 1: raise ValueError('cannot create graphs with no vertex') self.V = int(V) # define the parents if parents is None: self.parents = np.arange(self.V) else: if np.size(parents) != V: raise ValueError('Incorrect size for parents') if parents.max() > self.V: raise ValueError('Incorrect value for parents') self.parents = np.reshape(parents, self.V) self.define_graph_attributes() if self.check() == 0: raise ValueError('The proposed structure is not a forest') self.children = [] if height is None: height = np.zeros(self.V) else: if np.size(height) != V: raise ValueError('Incorrect size for height') self.height = np.reshape(height, self.V) def set_height(self, height=None): """Set the height array """ if height is None: height = np.zeros(self.V) if np.size(height) != self.V: raise ValueError('Incorrect size for height') self.height = np.reshape(height, self.V) def get_height(self): """Get the height array """ return self.height def check_compatible_height(self): """Check that height[parents[i]]>=height[i] for all nodes """ OK = True for i in range(self.V): if self.height[self.parents[i]] < self.height[i]: OK = False return OK def plot(self, ax=None): """Plot the dendrogram associated with self the rank of the data in the dendogram is returned Parameters ---------- ax: axis handle, optional Returns ------- ax, the axis handle """ import matplotlib.pyplot as plt if self.check_compatible_height() == False: raise ValueError('cannot plot myself in my current state') n = np.sum(self.isleaf()) # 1. find a permutation of the leaves that makes it nice aux = _label(self.parents) temp = np.zeros(self.V) rank = np.arange(self.V) temp[:n] = np.argsort(aux[:n]) for i in range(n): rank[int(temp[i])] = i # 2. derive the abscissa in the dendrogram idx = np.zeros(self.V) temp = np.argsort(rank[:n]) for i in range(n): idx[temp[i]] = i for i in range(n, self.V): j = np.nonzero(self.parents == i)[0] idx[i] = np.mean(idx[j]) # 3. plot if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) for i in range(self.V): h1 = self.height[i] h2 = self.height[self.parents[i]] plt.plot([idx[i], idx[i]], [h1, h2], 'k') ch = self.get_children() for i in range(self.V): if np.size(ch[i]) > 0: lidx = idx[ch[i]] m = lidx.min() M = lidx.max() h = self.height[i] plt.plot([m, M], [h, h], 'k') cM = 1.05 * self.height.max() - 0.05 * self.height.min() cm = 1.05 * self.height.min() - 0.05 * self.height.max() plt.axis([-1, idx.max() + 1, cm, cM]) return ax def partition(self, threshold): """ Partition the tree according to a cut criterion """ valid = self.height < threshold f = self.subforest(valid) u = f.cc() return u[f.isleaf()] def split(self, k): """ idem as partition, but a number of components are supplied instead """ k = int(k) if k > self.V: k = self.V nbcc = self.cc().max() + 1 if k <= nbcc: u = self.cc() return u[self.isleaf()] sh = np.sort(self.height) th = sh[nbcc - k] u = self.partition(th) return u def plot_height(self): """Plot the height of the non-leaves nodes """ import matplotlib.pyplot as plt plt.figure() sh = np.sort(self.height[self.isleaf() == False]) n = np.sum(self.isleaf() == False) plt.bar(np.arange(n), sh) def list_of_subtrees(self): """ returns the list of all non-trivial subtrees in the graph Caveat: this function assumes that the vertices are sorted in a way such that parent[i]>i for all i Only the leaves are listeed, not the subtrees themselves """ lst = [np.array([], np.int_) for i in range(self.V)] n = np.sum(self.isleaf()) for i in range(n): lst[i] = np.array([i], np.int_) for i in range(self.V - 1): j = self.parents[i] lst[j] = np.hstack((lst[i], lst[j])) return lst[n:self.V] #-------------------------------------------------------------------------- #------------- Average link clustering on a graph ------------------------- # ------------------------------------------------------------------------- def fusion(K, pop, i, j, k): """ Modifies the graph K to merge nodes i and j into nodes k The similarity values are weighted averaged, where pop[i] and pop[j] yield the relative weights. this is used in average_link_slow (deprecated) """ # fi = float(pop[i]) / (pop[k]) fj = 1.0 - fi # # replace i ny k # idxi = np.nonzero(K.edges[:, 0] == i) K.weights[idxi] = K.weights[idxi] * fi K.edges[idxi, 0] = k idxi = np.nonzero(K.edges[:, 1] == i) K.weights[idxi] = K.weights[idxi] * fi K.edges[idxi, 1] = k # # replace j by k # idxj = np.nonzero(K.edges[:, 0] == j) K.weights[idxj] = K.weights[idxj] * fj K.edges[idxj, 0] = k idxj = np.nonzero(K.edges[:, 1] == j) K.weights[idxj] = K.weights[idxj] * fj K.edges[idxj, 1] = k # #sum/remove double edges # #left side idxk = np.nonzero(K.edges[:, 0] == k)[0] corr = K.edges[idxk, 1] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i1 = idxk[acorr[a]] i2 = idxk[acorr[a + 1]] K.weights[i1] = K.weights[i1] + K.weights[i2] K.weights[i2] = - np.inf K.edges[i2] = -1 #right side idxk = np.nonzero(K.edges[:, 1] == k)[0] corr = K.edges[idxk, 0] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i1 = idxk[acorr[a]] i2 = idxk[acorr[a + 1]] K.weights[i1] = K.weights[i1] + K.weights[i2] K.weights[i2] = - np.inf K.edges[i2] = - 1 def average_link_graph(G): """ Agglomerative function based on a (hopefully sparse) similarity graph Parameters ---------- G the input graph Returns ------- t a weightForest structure that represents the dendrogram of the data CAVEAT ------ In that case, the homogeneity is associated with high similarity (as opposed to low cost as in most clustering procedures, e.g. distance-based procedures). Thus the tree is created with negated affinity values, in roder to respect the traditional ordering of cluster potentials. individual points have the potential (-np.inf). This problem is handled transparently in the associated segment function. """ warn('Function average_link_graph deprecated, will be removed', FutureWarning, stacklevel=2) # prepare a graph with twice the number of vertices n = G.V nbcc = G.cc().max() + 1 K = WeightedGraph(2 * G.V) K.E = G.E K.edges = G.edges.copy() K.weights = G.weights.copy() parent = np.arange(2 * n - nbcc, dtype=np.int_) pop = np.ones(2 * n - nbcc, np.int_) height = np.inf * np.ones(2 * n - nbcc) # iteratively merge clusters for q in range(n - nbcc): # 1. find the heaviest edge m = (K.weights).argmax() cost = K.weights[m] k = q + n height[k] = cost i = K.edges[m, 0] j = K.edges[m, 1] # 2. remove the current edge K.edges[m] = -1 K.weights[m] = - np.inf m = np.nonzero((K.edges[:, 0] == j) * (K.edges[:, 1] == i))[0] K.edges[m] = - 1 K.weights[m] = - np.inf # 3. merge the edges with third part edges parent[i] = k parent[j] = k pop[k] = pop[i] + pop[j] fusion(K, pop, i, j, k) height[height < 0] = 0 height[np.isinf(height)] = height[n] + 1 t = WeightedForest(2 * n - nbcc, parent, - height) return t def average_link_graph_segment(G, stop=0, qmax=1, verbose=False): """Agglomerative function based on a (hopefully sparse) similarity graph Parameters ---------- G the input graph stop: float the stopping criterion qmax: int, optional the number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V) a labelling of the graph vertices according to the criterion cost: array of shape (G.V (?)) the cost of each merge step during the clustering procedure """ warn('Function average_link_graph_segment deprecated, will be removed', FutureWarning, stacklevel=2) # prepare a graph with twice the number of vertices n = G.V if qmax == - 1: qmax = n qmax = int(np.minimum(qmax, n)) t = average_link_graph(G) if verbose: t.plot() u1 = np.zeros(n, np.int_) u2 = np.zeros(n, np.int_) if stop >= 0: u1 = t.partition( - stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = - t.get_height() cost = cost[t.isleaf() == False] return u, cost #-------------------------------------------------------------------------- #------------- Ward's algorithm with graph constraints -------------------- # ------------------------------------------------------------------------- def _inertia_(i, j, Features): """ Compute the variance of the set which is the concatenation of Feature[i] and Features[j] """ if np.size(np.shape(Features[i])) < 2: print(i, np.shape(Features[i]), Features[i]) if np.size(np.shape(Features[i])) < 2: print(j, np.shape(Features[j]), Features[j]) if np.shape(Features[i])[1] != np.shape(Features[j])[1]: print(i, j, np.shape(Features[i]), np.shape(Features[j])) localset = np.vstack((Features[i], Features[j])) return np.var(localset, 0).sum() def _inertia(i, j, Features): """ Compute the variance of the set which is the concatenation of Feature[i] and Features[j] """ n = Features[0][i] + Features[0][j] s = Features[1][i] + Features[1][j] q = Features[2][i] + Features[2][j] return np.sum(q - (s ** 2 / n)) def _initial_inertia(K, Features, seeds=None): """ Compute the variance associated with each edge-related pair of vertices The result is written in K;weights if seeds if provided (seeds!=None) this is done only for vertices adjacent to the seeds """ if seeds is None: for e in range(K.E): i = K.edges[e, 0] j = K.edges[e, 1] ESS = _inertia(i, j, Features) K.weights[e] = ESS else: aux = np.zeros(K.V).astype('bool') aux[seeds] = 1 for e in range(K.E): i = K.edges[e, 0] j = K.edges[e, 1] if (aux[i] or aux[j]): K.weights[e] = _inertia(i, j, Features) else: K.weights[e] = np.inf def _auxiliary_graph(G, Features): """ prepare a graph with twice the number of vertices this graph will contain the connectivity information along the merges. """ K = WeightedGraph(2 * G.V - 1) K.E = G.E K.edges = G.edges.copy() K.weights = np.ones(K.E) K.symmeterize() if K.E > 0: valid = K.edges[:, 0] < K.edges[:, 1] K.remove_edges(valid) # K.remove_trivial_edges() _initial_inertia(K, Features) return K def _remap(K, i, j, k, Features, linc, rinc): """Modifies the graph K to merge nodes i and j into nodes k the graph weights are modified accordingly Parameters ---------- K graph instance: the existing graphical model i,j,k: int indexes of the nodes to be merged and of the parent respectively Features: list of node-per-node features linc: array of shape(K.V) left incidence matrix rinc: array of shape(K.V) right incidencematrix """ # ------- # replace i by k # -------- idxi = np.array(linc[i]).astype(np.int_) if np.size(idxi) > 1: for l in idxi: K.weights[l] = _inertia(k, K.edges[l, 1], Features) elif np.size(idxi) == 1: K.weights[idxi] = _inertia(k, K.edges[idxi, 1], Features) if np.size(idxi) > 0: K.edges[idxi, 0] = k idxi = np.array(rinc[i]).astype(np.int_) if np.size(idxi) > 1: for l in idxi: K.weights[l] = _inertia(K.edges[l, 0], k, Features) elif np.size(idxi) == 1: K.weights[idxi] = _inertia(K.edges[idxi, 0], k, Features) if np.size(idxi) > 0: K.edges[idxi, 1] = k #------ # replace j by k #------- idxj = np.array(linc[j]).astype(np.int_) if np.size(idxj) > 1: for l in idxj: K.weights[l] = _inertia(k, K.edges[l, 1], Features) elif np.size(idxj) == 1: K.weights[idxj] = _inertia(k, K.edges[idxj, 1], Features) if np.size(idxj) > 0: K.edges[idxj, 0] = k idxj = np.array(rinc[j]).astype(np.int_) if np.size(idxj) > 1: for l in idxj: K.weights[l] = _inertia(k, K.edges[l, 0], Features) elif np.size(idxj) == 1: K.weights[idxj] = _inertia(k, K.edges[idxj, 0], Features) if np.size(idxj) > 0: K.edges[idxj, 1] = k #------ # update linc, rinc #------ lidxk = list(linc[j]) + list(linc[i]) for L in lidxk: if K.edges[L, 1] == -1: lidxk.remove(L) linc[k] = lidxk linc[i] = [] linc[j] = [] ridxk = list(rinc[j]) + list(rinc[i]) for L in ridxk: if K.edges[L, 0] == -1: ridxk.remove(L) rinc[k] = ridxk rinc[i] = [] rinc[j] = [] #------ #remove double edges #------ #left side idxk = np.array(linc[k]).astype(np.int_) if np.size(idxk) > 0: corr = K.edges[idxk, 1] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i2 = idxk[acorr[a + 1]] K.weights[i2] = np.inf rinc[K.edges[i2, 1]].remove(i2) K.edges[i2] = - 1 linc[k].remove(i2) #right side idxk = np.array(rinc[k]).astype(np.int_) if np.size(idxk) > 0: corr = K.edges[idxk, 0] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i2 = idxk[acorr[a + 1]] K.weights[i2] = np.inf linc[K.edges[i2, 0]].remove(i2) K.edges[i2] = - 1 rinc[k].remove(i2) return linc, rinc def ward_quick(G, feature, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph instance topology-defining graph feature: array of shape (G.V,dim_feature) some vectorial information related to the graph vertices verbose : bool, optional If True, print diagnostic information Returns ------- t: weightForest instance, that represents the dendrogram of the data Notes ---- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ warn('Function ward_quick from ' 'nipy.algorithms.clustering.hierrachical_clustering ' 'deprecated, will be removed', FutureWarning, stacklevel=2) # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])), np.zeros((2 * G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature ** 2 n = G.V nbcc = G.cc().max() + 1 # prepare a graph with twice the number of vertices K = _auxiliary_graph(G, Features) parent = np.arange(2 * n - nbcc).astype(np.int_) height = np.zeros(2 * n - nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters q = 0 while (q < n - nbcc): # 1. find the lightest edges aux = np.zeros(2 * n) ape = np.nonzero(K.weights < np.inf) ape = np.reshape(ape, np.size(ape)) idx = np.argsort(K.weights[ape]) for e in range(n - nbcc - q): i, j = K.edges[ape[idx[e]], 0], K.edges[ape[idx[e]], 1] if (aux[i] == 1) or (aux[j] == 1): break aux[i] = 1 aux[j] = 1 emax = np.maximum(e, 1) for e in range(emax): m = ape[idx[e]] cost = K.weights[m] k = q + n i = K.edges[m, 0] j = K.edges[m, 1] height[k] = cost if verbose: print(q, i, j, m, cost) # 2. remove the current edge K.edges[m] = -1 K.weights[m] = np.inf linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml, 1] == i) > 0: m = ml[int(np.flatnonzero(K.edges[ml, 1] == i))] K.edges[m] = -1 K.weights[m] = np.inf linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] linc, rinc = _remap(K, i, j, k, Features, linc, rinc) q += 1 # build a tree to encode the results t = WeightedForest(2 * n - nbcc, parent, height) return t def ward_field_segment(F, stop=-1, qmax=-1, verbose=False): """Agglomerative function based on a field structure Parameters ---------- F the input field (graph+feature) stop: float, optional the stopping crterion. if stop==-1, then no stopping criterion is used qmax: int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (F.V) labelling of the graph vertices according to the criterion cost array of shape (F.V - 1) the cost of each merge step during the clustering procedure Notes ----- See ward_quick_segment for more information Caveat : only approximate """ u, cost = ward_quick_segment(F, F.field, stop, qmax, verbose) return u, cost def ward_quick_segment(G, feature, stop=-1, qmax=1, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G: labs.graph.WeightedGraph instance the input graph (a topological graph essentially) feature array of shape (G.V,dim_feature) vectorial information related to the graph vertices stop1 : int or float, optional the stopping crterion if stop==-1, then no stopping criterion is used qmax : int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V) labelling of the graph vertices according to the criterion cost: array of shape (G.V - 1) the cost of each merge step during the clustering procedure Notes ----- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") n = G.V if stop == - 1: stop = np.inf qmax = int(np.minimum(qmax, n - 1)) t = ward_quick(G, feature, verbose) if verbose: t.plot() u1 = np.zeros(n, np.int_) u2 = np.zeros(n, np.int_) if stop >= 0: u1 = t.partition(stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf() == False] return u, cost def ward_segment(G, feature, stop=-1, qmax=1, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph object the input graph (a topological graph essentially) feature : array of shape (G.V,dim_feature) some vectorial information related to the graph vertices stop : int or float, optional the stopping crterion. if stop==-1, then no stopping criterion is used qmax : int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V): a labelling of the graph vertices according to the criterion cost: array of shape (G.V - 1) the cost of each merge step during the clustering procedure Notes ----- A euclidean distance is used in the feature space Caveat : when the number of cc in G (nbcc) is greter than qmax, u contains nbcc values, not qmax ! """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") # prepare a graph with twice the number of vertices n = G.V if qmax == -1: qmax = n - 1 if stop == -1: stop = np.inf qmax = int(np.minimum(qmax, n - 1)) t = ward(G, feature, verbose) u1 = np.zeros(n, np.int_) u2 = np.zeros(n, np.int_) if stop >= 0: u1 = t.partition(stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf() == False] return u, cost def ward(G, feature, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph the input graph (a topological graph essentially) feature : array of shape (G.V,dim_feature) vectorial information related to the graph vertices verbose : bool, optional If True, print diagnostic information Returns -------- t : ``WeightedForest`` instance structure that represents the dendrogram Notes ----- When G has more than 1 connected component, t is no longer a tree. This case is handled cleanly now """ warn('Function ward from ' 'nipy.algorithms.clustering.hierrachical_clustering ' 'deprecated, will be removed', FutureWarning, stacklevel=2) # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])), np.zeros((2 * G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature ** 2 # prepare a graph with twice the number of vertices # this graph will contain the connectivity information # along the merges. n = G.V nbcc = G.cc().max() + 1 K = _auxiliary_graph(G, Features) # prepare some variables that are useful tp speed up the algorithm parent = np.arange(2 * n - nbcc).astype(np.int_) height = np.zeros(2 * n - nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters for q in range(n - nbcc): # 1. find the lightest edge m = (K.weights).argmin() cost = K.weights[m] k = q + n i = K.edges[m, 0] j = K.edges[m, 1] height[k] = cost if verbose: print(q, i, j, m, cost) # 2. remove the current edge K.edges[m] = - 1 K.weights[m] = np.inf linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml, 1] == i) > 0: m = ml[int(np.flatnonzero(K.edges[ml, 1] == i))] K.edges[m] = -1 K.weights[m] = np.inf linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] linc, rinc = _remap(K, i, j, k, Features, linc, rinc) # build a tree to encode the results t = WeightedForest(2 * n - nbcc, parent, height) return t #-------------------------------------------------------------------------- #----------------------- Visualization ------------------------------------ # ------------------------------------------------------------------------- def _label_(f, parent, left, labelled): temp = np.nonzero(parent == f) if np.size(temp) > 0: i = temp[0][np.nonzero(left[temp[0]] == 1)] j = temp[0][np.nonzero(left[temp[0]] == 0)] labelled = _label_(i, parent, left, labelled) labelled[f] = labelled.max() + 1 labelled = _label_(j, parent, left, labelled) if labelled[f] < 0: labelled[f] = labelled.max() + 1 return labelled def _label(parent): # find the root root = np.nonzero(parent == np.arange(np.size(parent)))[0] # define left left = np.zeros(np.size(parent)) for f in range(np.size(parent)): temp = np.nonzero(parent == f) if np.size(temp) > 0: left[temp[0][0]] = 1 left[root] = .5 # define labelled labelled = - np.ones(np.size(parent)) # compute labelled for j in range(np.size(root)): labelled = _label_(root[j], parent, left, labelled) return labelled nipy-0.6.1/nipy/algorithms/clustering/imm.py000066400000000000000000000531671470056100100211460ustar00rootroot00000000000000""" Infinite mixture model : A generalization of Bayesian mixture models with an unspecified number of classes """ import math import numpy as np from scipy.special import gammaln from .bgmm import BGMM, detsh def co_labelling(z, kmax=None, kmin=None): """ return a sparse co-labelling matrix given the label vector z Parameters ---------- z: array of shape(n_samples), the input labels kmax: int, optional, considers only the labels in the range [0, kmax[ Returns ------- colabel: a sparse coo_matrix, yields the co labelling of the data i.e. c[i,j]= 1 if z[i]==z[j], 0 otherwise """ from scipy.sparse import coo_matrix n = z.size colabel = coo_matrix((n, n)) if kmax is None: kmax = z.max() + 1 if kmin is None: kmin = z.min() - 1 for k in np.unique(z): if (k < kmax) & (k > kmin): i = np.array(np.nonzero(z == k)) row = np.repeat(i, i.size) col = np.ravel(np.tile(i, i.size)) data = np.ones((i.size) ** 2) colabel = colabel + coo_matrix((data, (row, col)), shape=(n, n)) return colabel class IMM(BGMM): """ The class implements Infinite Gaussian Mixture model or Dirichlet Process Mixture model. This is simply a generalization of Bayesian Gaussian Mixture Models with an unknown number of classes. """ def __init__(self, alpha=.5, dim=1): """ Parameters ---------- alpha: float, optional, the parameter for cluster creation dim: int, optional, the dimension of the the data Note: use the function set_priors() to set adapted priors """ self.dim = dim self.alpha = alpha self.k = 0 self.prec_type = 'full' # initialize weights self.weights = [1] def set_priors(self, x): """ Set the priors in order of having them weakly uninformative this is from Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x, array of shape (n_samples,self.dim) the data used in the estimation process """ # a few parameters small = 0.01 elshape = (1, self.dim, self.dim) mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.maximum(1.e-15, np.dot(dx.T, dx) / x.shape[0]) px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape) # set the priors self._prior_means = mx self.prior_means = mx self.prior_weights = self.alpha self._prior_scale = px self.prior_scale = px self._prior_dof = self.dim + 2 self.prior_dof = [self._prior_dof] self._prior_shrinkage = small self.prior_shrinkage = [self._prior_shrinkage] # cache some pre-computations self._dets_ = detsh(px[0]) self._dets = [self._dets_] self._inv_prior_scale_ = np.reshape(np.linalg.inv(px[0]), elshape) self.prior_dens = None def set_constant_densities(self, prior_dens=None): """Set the null and prior densities as constant (assuming a compact domain) Parameters ---------- prior_dens: float, optional constant for the prior density """ self.prior_dens = prior_dens def sample(self, x, niter=1, sampling_points=None, init=False, kfold=None, verbose=0): """sample the indicator and parameters Parameters ---------- x: array of shape (n_samples, self.dim) the data used in the estimation process niter: int, the number of iterations to perform sampling_points: array of shape(nbpoints, self.dim), optional points where the likelihood will be sampled this defaults to x kfold: int or array, optional, parameter of cross-validation control by default, no cross-validation is used the procedure is faster but less accurate verbose=0: verbosity mode Returns ------- likelihood: array of shape(nbpoints) total likelihood of the model """ self.check_x(x) if sampling_points is None: average_like = np.zeros(x.shape[0]) else: average_like = np.zeros(sampling_points.shape[0]) splike = self.likelihood_under_the_prior(sampling_points) plike = self.likelihood_under_the_prior(x) if init: self.k = 1 z = np.zeros(x.shape[0]) self.update(x, z) like = self.likelihood(x, plike) z = self.sample_indicator(like) for i in range(niter): if kfold is None: like = self.simple_update(x, z, plike) else: like = self.cross_validated_update(x, z, plike, kfold) if sampling_points is None: average_like += like else: average_like += np.sum( self.likelihood(sampling_points, splike), 1) average_like /= niter return average_like def simple_update(self, x, z, plike): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior Returns ------- like: array od shape(n_samples), the likelihood of the data """ like = self.likelihood(x, plike) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z = self.sample_indicator(like) # almost standard, but many new components can be created self.reduce(z) self.update(x, z) return like.sum(1) def cross_validated_update(self, x, z, plike, kfold=10): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior kfold: int, or array of shape(n_samples), optional, folds in the cross-validation loop Returns ------- like: array od shape(n_samples), the (cross-validated) likelihood of the data """ n_samples = x.shape[0] slike = np.zeros(n_samples) if np.isscalar(kfold): aux = np.argsort(np.random.rand(n_samples)) idx = - np.ones(n_samples).astype(np.int_) j = int(math.ceil(n_samples / kfold)) kmax = kfold for k in range(kmax): idx[aux[k * j:min(n_samples, j * (k + 1))]] = k else: if np.array(kfold).size != n_samples: raise ValueError('kfold and x do not have the same size') uk = np.unique(kfold) np.random.shuffle(uk) idx = np.zeros(n_samples).astype(np.int_) for i, k in enumerate(uk): idx += (i * (kfold == k)) kmax = uk.max() + 1 for k in range(kmax): test = np.zeros(n_samples).astype('bool') test[idx == k] = 1 train = np.logical_not(test) # remove a fraction of the data # and re-estimate the clusters z[train] = self.reduce(z[train]) self.update(x[train], z[train]) # draw the membership for the left-out data alike = self.likelihood(x[test], plike[test]) slike[test] = alike.sum(1) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z[test] = self.sample_indicator(alike) # almost standard, but many new components can be created return slike def reduce(self, z): """Reduce the assignments by removing empty clusters and update self.k Parameters ---------- z: array of shape(n), a vector of membership variables changed in place Returns ------- z: the remapped values """ uz = np.unique(z[z > - 1]) for i, k in enumerate(uz): z[z == k] = i self.k = z.max() + 1 return z def update(self, x, z): """ Update function (draw a sample of the IMM parameters) Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process z array of shape (n_samples), type = np.int_ the corresponding classification """ # re-dimension the priors in order to match self.k self.prior_means = np.repeat(self._prior_means, self.k, 0) self.prior_dof = self._prior_dof * np.ones(self.k) self.prior_shrinkage = self._prior_shrinkage * np.ones(self.k) self._dets = self._dets_ * np.ones(self.k) self._inv_prior_scale = np.repeat(self._inv_prior_scale_, self.k, 0) # initialize some variables self.means = np.zeros((self.k, self.dim)) self.precisions = np.zeros((self.k, self.dim, self.dim)) # proceed with the update BGMM.update(self, x, z) def update_weights(self, z): """ Given the allocation vector z, resmaple the weights parameter Parameters ---------- z array of shape (n_samples), type = np.int_ the allocation variable """ pop = np.hstack((self.pop(z), 0)) self.weights = pop + self.prior_weights self.weights /= self.weights.sum() def sample_indicator(self, like): """ Sample the indicator from the likelihood Parameters ---------- like: array of shape (nbitem,self.k) component-wise likelihood Returns ------- z: array of shape(nbitem): a draw of the membership variable Notes ----- The behaviour is different from standard bgmm in that z can take arbitrary values """ z = BGMM.sample_indicator(self, like) z[z == self.k] = self.k + np.arange(np.sum(z == self.k)) return z def likelihood_under_the_prior(self, x): """ Computes the likelihood of x under the prior Parameters ---------- x, array of shape (self.n_samples,self.dim) returns ------- w, the likelihood of x under the prior model (unweighted) """ if self.prior_dens is not None: return self.prior_dens * np.ones(x.shape[0]) a = self._prior_dof tau = self._prior_shrinkage tau /= (1 + tau) m = self._prior_means b = self._prior_scale ib = np.linalg.inv(b[0]) ldb = np.log(detsh(b[0])) scalar_w = np.log(tau / np.pi) * self.dim scalar_w += 2 * gammaln((a + 1) / 2) scalar_w -= 2 * gammaln((a - self.dim) / 2) scalar_w -= ldb * a w = scalar_w * np.ones(x.shape[0]) for i in range(x.shape[0]): w[i] -= (a + 1) * np.log(detsh(ib + tau * (m - x[i:i + 1]) * (m - x[i:i + 1]).T)) w /= 2 return np.exp(w) def likelihood(self, x, plike=None): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x: array of shape (n_samples, self.dim), the data used in the estimation process plike: array of shape (n_samples), optional, the density of each point under the prior Returns ------- like, array of shape (nbitem, self.k) component-wise likelihood """ if plike is None: plike = self.likelihood_under_the_prior(x) plike = np.reshape(plike, (x.shape[0], 1)) if self.k > 0: like = self.unweighted_likelihood(x) like = np.hstack((like, plike)) else: like = plike like *= self.weights return like class MixedIMM(IMM): """ Particular IMM with an additional null class. The data is supplied together with a sample-related probability of being under the null. """ def __init__(self, alpha=.5, dim=1): """ Parameters ---------- alpha: float, optional, the parameter for cluster creation dim: int, optional, the dimension of the the data Note: use the function set_priors() to set adapted priors """ IMM.__init__(self, alpha, dim) def set_constant_densities(self, null_dens=None, prior_dens=None): """ Set the null and prior densities as constant (over a supposedly compact domain) Parameters ---------- null_dens: float, optional constant for the null density prior_dens: float, optional constant for the prior density """ self.null_dens = null_dens self.prior_dens = prior_dens def sample(self, x, null_class_proba, niter=1, sampling_points=None, init=False, kfold=None, co_clustering=False, verbose=0): """ sample the indicator and parameters Parameters ---------- x: array of shape (n_samples, self.dim), the data used in the estimation process null_class_proba: array of shape(n_samples), the probability to be under the null niter: int, the number of iterations to perform sampling_points: array of shape(nbpoints, self.dim), optional points where the likelihood will be sampled this defaults to x kfold: int, optional, parameter of cross-validation control by default, no cross-validation is used the procedure is faster but less accurate co_clustering: bool, optional if True, return a model of data co-labelling across iterations verbose=0: verbosity mode Returns ------- likelihood: array of shape(nbpoints) total likelihood of the model pproba: array of shape(n_samples), the posterior of being in the null (the posterior of null_class_proba) coclust: only if co_clustering==True, sparse_matrix of shape (n_samples, n_samples), frequency of co-labelling of each sample pairs across iterations """ self.check_x(x) pproba = np.zeros(x.shape[0]) if sampling_points is None: average_like = np.zeros(x.shape[0]) else: average_like = np.zeros(sampling_points.shape[0]) splike = self.likelihood_under_the_prior(sampling_points) plike = self.likelihood_under_the_prior(x) if init: self.k = 1 z = np.zeros(x.shape[0]) self.update(x, z) like = self.likelihood(x, plike) z = self.sample_indicator(like, null_class_proba) if co_clustering: from scipy.sparse import coo_matrix coclust = coo_matrix((x.shape[0], x.shape[0])) for i in range(niter): if kfold is None: like = self.simple_update(x, z, plike, null_class_proba) else: like, z = self.cross_validated_update(x, z, plike, null_class_proba, kfold) llike = self.likelihood(x, plike) z = self.sample_indicator(llike, null_class_proba) pproba += (z == - 1) if co_clustering: coclust = coclust + co_labelling(z, self.k, -1) if sampling_points is None: average_like += like else: average_like += np.sum( self.likelihood(sampling_points, splike), 1) average_like /= niter pproba /= niter if co_clustering: coclust /= niter return average_like, pproba, coclust return average_like, pproba def simple_update(self, x, z, plike, null_class_proba): """ One step in the sampling procedure (one data sweep) Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- like: array od shape(n_samples), the likelihood of the data under the H1 hypothesis """ like = self.likelihood(x, plike) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z = self.sample_indicator(like, null_class_proba) # almost standard, but many new components can be created self.reduce(z) self.update(x, z) return like.sum(1) def cross_validated_update(self, x, z, plike, null_class_proba, kfold=10): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior kfold: int, optional, or array number of folds in cross-validation loop or set of indexes for the cross-validation procedure null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- like: array od shape(n_samples), the (cross-validated) likelihood of the data z: array of shape(n_samples), the associated membership variables Notes ----- When kfold is an array, there is an internal reshuffling to randomize the order of updates """ n_samples = x.shape[0] slike = np.zeros(n_samples) if np.isscalar(kfold): aux = np.argsort(np.random.rand(n_samples)) idx = - np.ones(n_samples).astype(np.int_) j = int(math.ceil(n_samples / kfold)) kmax = kfold for k in range(kmax): idx[aux[k * j:min(n_samples, j * (k + 1))]] = k else: if np.array(kfold).size != n_samples: raise ValueError('kfold and x do not have the same size') uk = np.unique(kfold) np.random.shuffle(uk) idx = np.zeros(n_samples).astype(np.int_) for i, k in enumerate(uk): idx += (i * (kfold == k)) kmax = uk.max() + 1 for k in range(kmax): # split at iteration k test = np.zeros(n_samples).astype('bool') test[idx == k] = 1 train = np.logical_not(test) # remove a fraction of the data # and re-estimate the clusters z[train] = self.reduce(z[train]) self.update(x[train], z[train]) # draw the membership for the left-out data alike = self.likelihood(x[test], plike[test]) slike[test] = alike.sum(1) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z[test] = self.sample_indicator(alike, null_class_proba[test]) # almost standard, but many new components can be created return slike, z def sample_indicator(self, like, null_class_proba): """ sample the indicator from the likelihood Parameters ---------- like: array of shape (nbitem,self.k) component-wise likelihood null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- z: array of shape(nbitem): a draw of the membership variable Notes ----- Here z=-1 encodes for the null class """ n = like.shape[0] conditional_like_1 = ((1 - null_class_proba) * like.T).T conditional_like_0 = np.reshape(null_class_proba * self.null_dens, (n, 1)) conditional_like = np.hstack((conditional_like_0, conditional_like_1)) z = BGMM.sample_indicator(self, conditional_like) - 1 z[z == self.k] = self.k + np.arange(np.sum(z == self.k)) return z def main(): """ Illustrative example of the behaviour of imm """ n = 100 dim = 2 alpha = .5 aff = np.random.randn(dim, dim) x = np.dot(np.random.randn(n, dim), aff) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, kfold=10) print('number of components: ', igmm.k) # print('number of components: ', igmm.k) if dim < 3: from .gmm import plot2D plot2D(x, igmm, verbose=1) return igmm if __name__ == '__main__': main() nipy-0.6.1/nipy/algorithms/clustering/meson.build000066400000000000000000000004441470056100100221420ustar00rootroot00000000000000target_dir = 'nipy/algorithms/clustering' python_sources = [ '__init__.py', 'bgmm.py', 'ggmixture.py', 'gmm.py', 'hierarchical_clustering.py', 'imm.py', 'utils.py', 'von_mises_fisher_mixture.py' ] py3.install_sources( python_sources, pure: false, subdir: target_dir ) nipy-0.6.1/nipy/algorithms/clustering/tests/000077500000000000000000000000001470056100100211405ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/clustering/tests/__init__.py000066400000000000000000000000001470056100100232370ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/clustering/tests/test_bgmm.py000066400000000000000000000113371470056100100235000ustar00rootroot00000000000000""" # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: Test the Bayesian GMM. fixme : some of these tests take too much time at the moment to be real unit tests Author : Bertrand Thirion, 2009 """ import numpy as np import numpy.random as nr from ..bgmm import BGMM, VBGMM, dirichlet_eval, dkl_gaussian, multinomial def test_dirichlet_eval(): # check that the Dirichlet evaluation function sums to one on a simple # example alpha = np.array([0.5, 0.5]) sd = 0 for i in range(10000): e = i * 0.0001 + 0.00005 sd += dirichlet_eval(np.array([e, 1 - e]), alpha) assert np.absolute(sd.sum() * 0.0001 - 1) < 0.01 def test_multinomial(): """ test of the generate_multinomial function: check that is sums to 1 in a simple case """ n_samples = 100000 n_classes = 5 aux = np.reshape(np.random.rand(n_classes), (1, n_classes)) aux /= aux.sum() likelihood = np.repeat(aux, n_samples, 0) z = multinomial(likelihood) res = np.array([np.sum(z == k) for k in range(n_classes)]) res = res * 1.0 / n_samples assert np.sum((aux-res) ** 2) < 1.e-4 def test_dkln1(): dim = 3 m1 = np.zeros(dim) P1 = np.eye(dim) m2 = m1 P2 = P1 assert dkl_gaussian(m1, P1, m2, P2) == 0 def test_dkln2(): dim, offset = 3, 4. m1 = np.zeros(dim) P1 = np.eye(dim) m2 = offset * np.ones(dim) P2 = np.eye(dim) assert dkl_gaussian(m1, P1, m2, P2) == .5 * dim * offset ** 2 def test_dkln3(): dim, scale = 3, 4 m1, m2 = np.zeros(dim), np.zeros(dim) P1, P2 = np.eye(dim), scale * np.eye(dim) test1 = .5 * (dim * np.log(scale) + dim * (1. / scale - 1)) test2 = .5 * (-dim * np.log(scale) + dim * (scale - 1)) assert dkl_gaussian(m1, P1, m2, P2) == test2 def test_bgmm_gibbs(): # Perform the estimation of a gmm using Gibbs sampling n_samples, k, dim, niter, offset = 100, 2, 2, 1000, 2. x = nr.randn(n_samples,dim) x[:30] += offset b = BGMM(k,dim) b.guess_priors(x) b.initialize(x) b.sample(x, 1) w, cent, prec, pz = b.sample(x, niter, mem=1) b.plugin(cent, prec, w) z = pz[:, 0] # fixme : find a less trivial test assert z.max() + 1 == b.k def test_gmm_bf(kmax=4, seed=1): """ Perform a model selection procedure on a gmm with Bayes factor estimations Parameters ---------- kmax : range of values that are tested seed=False: int, optional If seed is not False, the random number generator is initialized at a certain value fixme : this one often fails. I don't really see why """ n_samples, dim, niter = 30, 2, 1000 if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr x = nr.randn(n_samples, dim) bbf = -np.inf for k in range(1, kmax): b = BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfk = bplugin.bayes_factor(x, pz.astype(np.int_)) if bfk > bbf: bestk = k bbf = bfk assert bestk < 3 def test_vbgmm(): """perform the estimation of a variational gmm """ n_samples, dim, offset, k = 100, 2, 2, 2 x = nr.randn(n_samples, dim) x[:30] += offset b = VBGMM(k,dim) b.guess_priors(x) b.initialize(x) b.estimate(x) z = b.map_label(x) # fixme : find a less trivial test assert z.max() + 1 == b.k def test_vbgmm_select(kmax=6): """ perform the estimation of a variational gmm + model selection """ nr.seed([0]) n_samples, dim, offset=100, 3, 2 x = nr.randn(n_samples, dim) x[:30] += offset be = - np.inf for k in range(1, kmax): b = VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) ek = b.evidence(x) if ek > be: be = ek bestk = k assert bestk < 3 def test_evidence(k=1): """ Compare the evidence estimated by Chib's method with the variational evidence (free energy) fixme : this one really takes time """ np.random.seed(0) n_samples, dim, offset = 50, 2, 3 x = nr.randn(n_samples, dim) x[:15] += offset b = VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) vbe = b.evidence(x) niter = 1000 b = BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfchib = bplugin.bayes_factor(x, pz.astype(np.int_), 1) assert bfchib > vbe nipy-0.6.1/nipy/algorithms/clustering/tests/test_clustering.py000066400000000000000000000015021470056100100247260ustar00rootroot00000000000000 # to run only the simple tests: # python testClustering.py Test_Clustering from unittest import TestCase import numpy as np import numpy.random as nr from ..utils import kmeans class TestClustering(TestCase): def testkmeans1(self): X = nr.randn(10, 2) A = np.concatenate([np.ones((7, 2)),np.zeros((3, 2))]) X = X + 3 * A; L = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) C, L, J = kmeans(X, 2, L) self.assertLess(np.mean(L[:7]), 0.5) def testkmeans2(self): X = nr.randn(10000, 2) A = np.concatenate([np.ones((7000, 2)), np.zeros((3000, 2))]) X = X + 3 * A L = np.concatenate([np.ones(5000), np.zeros(5000)]).astype(np.int_) C, L, J = kmeans(X, 2, L) l = L[:7000].astype(np.float64) self.assertGreater(np.mean(l), 0.9) nipy-0.6.1/nipy/algorithms/clustering/tests/test_ggm.py000066400000000000000000000040201470056100100233170ustar00rootroot00000000000000 import numpy as np import numpy.random as nr import scipy.stats as st from ..ggmixture import GGGM, GGM, Gamma def test_GGM1(verbose=0): shape = 1 scale = 1 mean = 0 var = 1 G = GGM(shape,scale,mean,var) sx = 1000 x = -2.5 + nr.randn(sx) G.estimate(x) b = np.absolute(G.mean+2.5)<0.5 if verbose: #G.parameters() print(x.max()) assert(b) def test_GGM2(verbose=0): shape = 1 scale = 1 mean = 0 var = 1 G = GGM(shape,scale,mean,var) sx = 1000 x = -2.5 + nr.randn(sx) G.estimate(x) if verbose: G.parameters() b = np.absolute(G.mixt)<0.1 assert(b) def test_GGGM0(verbose=0, seed=1): G = GGGM() sx = 1000 #x = np.array([float(st.t.rvs(dof)) for i in range(sx)]) if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr x = nr.randn(sx) G.init(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mean)<0.3) def test_GGGM1(verbose=0): G = GGGM() sx = 10000 x = np.array([float(st.t.rvs(5)) for i in range(sx)]) G.init_fdr(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mean)<0.1) def test_GGGM2(verbose=0): G = GGGM() sx = 10000 x = nr.randn(sx) G.init_fdr(x) G.estimate(x) assert(G.mixt[1]>0.9) def test_GGGM3(verbose=0): G = GGGM() sx = 1000 x = 100 + np.array([float(st.t.rvs(5)) for i in range(sx)]) G.init(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mixt[0])<1.e-15) def test_gamma_parameters1(verbose=0): import numpy.random as nr n = 1000 X = nr.gamma(11., 3., n) G = Gamma() G.estimate(X) if verbose: G.parameters() assert(np.absolute(G.shape-11)<2.) def test_gamma_parameters2(verbose=0): import numpy.random as nr n = 1000 X = nr.gamma(11., 3., n) G = Gamma() G.estimate(X) if verbose: G.parameters() assert(np.absolute(G.scale-3)<0.5) nipy-0.6.1/nipy/algorithms/clustering/tests/test_gmm.py000066400000000000000000000165001470056100100233330ustar00rootroot00000000000000 # to run only the simple tests: # python testClustering.py Test_Clustering import numpy as np from ..gmm import GMM, best_fitting_GMM # seed the random number generator to avoid rare random failures seed = 1 nr = np.random.RandomState([seed]) def test_em_loglike0(): # Test that the likelihood of the GMM is expected on standard data # 1-cluster model dim, k, n = 1, 1, 1000 x = nr.randn(n,dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi)) assert np.absolute(ll + ent) < 3. / np.sqrt(n) def test_em_loglike1(): # Test that the likelihood of the GMM is expected on standard data # 3-cluster model dim, k, n = 1, 3, 1000 x = nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi)) assert np.absolute(ll + ent) < 3. / np.sqrt(n) def test_em_loglike2(): # Test that the likelihood of the GMM is expected on standard data # non-centered data, non-unit variance dim, k, n = 1, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert np.absolute(ll + ent) < 3. / np.sqrt(n) def test_em_loglike3(): # Test that the likelihood of the GMM is expected on standard data # here dimension = 2 dim, k, n = 2, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n,dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert np.absolute(ll + ent) < dim * 3. / np.sqrt(n) def test_em_loglike4(): # Test that the likelihood of the GMM is expected on standard data # here dim = 5 dim, k, n = 5, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) lgmm = GMM(k,dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert np.absolute(ll + ent) < dim * 3. / np.sqrt(n) def test_em_loglike5(): # Test that the likelihood of the GMM is expected on standard data # Here test that this works also on test data generated iid dim, k, n = 2, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) y = offset + scale * nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(y) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert np.absolute(ll + ent) < dim * 3. / np.sqrt(n) def test_em_loglike6(): # Test that the likelihood of shifted data is lower # than the likelihood of non-shifted data dim, k, n = 1, 1, 100 offset = 3. x = nr.randn(n, dim) y = offset + nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll1 = lgmm.average_log_like(x) ll2 = lgmm.average_log_like(y) assert ll2 < ll1 def test_em_selection(): # test that the basic GMM-based model selection tool # returns something sensible # (i.e. the gmm used to represent the data has indeed one or two classes) dim = 2 x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) krange = list(range(1, 10)) lgmm = best_fitting_GMM(x, krange, prec_type='full', niter=100, delta = 1.e-4, ninit=1) assert lgmm.k < 4 def test_em_gmm_full(): # Computing the BIC value for different configurations # of a GMM with ful diagonal matrices # The BIC should be maximal for a number of classes of 1 or 2 # generate some data dim = 2 x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 bic = np.zeros(5) for k in range(1,6): lgmm = GMM(k, dim) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta) assert bic[4] < bic[1] def test_em_gmm_diag(): # Computing the BIC value for GMMs with different number of classes, # with diagonal covariance models # The BIC should maximal for a number of classes of 1 or 2 # generate some data dim = 2 x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim))) # estimate different GMMs of that data maxiter, delta = 100, 1.e-8 prec_type = 'diag' bic = np.zeros(5) for k in range(1, 6): lgmm = GMM(k, dim, prec_type) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta) z = lgmm.map_label(x) assert z.max() + 1 == lgmm.k assert bic[4] < bic[1] def test_em_gmm_multi(): # Playing with various initializations on the same data # generate some data dim = 2 x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(100, dim))) # estimate different GMMs of that data maxiter, delta, ninit, k = 100, 1.e-4, 5, 2 lgmm = GMM(k,dim) bgmm = lgmm.initialize_and_estimate(x, niter=maxiter, delta=delta, ninit=ninit) bic = bgmm.evidence(x) assert np.isfinite(bic) def test_em_gmm_largedim(): # testing the GMM model in larger dimensions # generate some data dim = 10 x = nr.randn(100, dim) x[:30] += 2 # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 for k in range(2, 3): lgmm = GMM(k,dim) bgmm = lgmm.initialize_and_estimate(x, None, maxiter, delta, ninit=5) z = bgmm.map_label(x) # define the correct labelling u = np.zeros(100) u[:30] = 1 #check the correlation between the true labelling # and the computed one eta = np.absolute(np.dot(z - z.mean(), u - u.mean()) /\ (np.std(z) * np.std(u) * 100)) assert eta > 0.3 def test_em_gmm_heterosc(): # testing the model in very ellipsoidal data: # compute the bic values for several values of k # and check that the maximal one is 1 or 2 # generate some data dim = 2 x = nr.randn(100, dim) x[:50] += 3 # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 bic = np.zeros(5) for k in range(1,6): lgmm = GMM(k, dim) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta, 0) assert bic[4] < bic[1] def test_em_gmm_cv(): # Comparison of different GMMs using cross-validation # generate some data dim = 2 xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim))) #estimate different GMMs for xtrain, and test it on xtest prec_type = 'full' k, maxiter, delta = 2, 300, 1.e-4 ll = [] # model 1 lgmm = GMM(k,dim,prec_type) lgmm.initialize(xtrain) bic = lgmm.estimate(xtrain,maxiter, delta) ll.append(lgmm.test(xtest).mean()) # model 2 prec_type = 'diag' lgmm = GMM(k, dim, prec_type) lgmm.initialize(xtrain) bic = lgmm.estimate(xtrain, maxiter, delta) ll.append(lgmm.test(xtest).mean()) for k in [1, 3, 10]: lgmm = GMM(k,dim,prec_type) lgmm.initialize(xtrain) ll.append(lgmm.test(xtest).mean()) assert ll[4] < ll[1] nipy-0.6.1/nipy/algorithms/clustering/tests/test_hierarchical_clustering.py000066400000000000000000000100641470056100100274270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Several basic tests for hierarchical clustering procedures. Should be cast soon in a nicer unitest framework Author : Bertrand Thirion, 2008-2009 """ import math import numpy as np from numpy.random import randn from nipy.algorithms.graph.field import field_from_graph_and_data from nipy.algorithms.graph.graph import knn from ..hierarchical_clustering import ( average_link_graph, average_link_graph_segment, ward, ward_field_segment, ward_quick, ward_quick_segment, ward_segment, ) def alg_test_basic(n=100,k=5): # Check that we obtain the correct solution in a simplistic case np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = average_link_graph(G) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w))==0 def alg_test_2(): # Do we handle case of graph with too many connected components? np.random.seed(0) n = 100 k = 5 x = np.random.randn(n, 2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x, k) t = average_link_graph(G) u = t.split(2) assert u.max()==2 def alg_test_3(n=100,k=5): # Check that we obtain the correct solution in a simplistic case np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) u, cost = average_link_graph_segment(G, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w))==0 def ward_test_basic(n=100,k=5): # Basic check of ward's algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = ward(G,x) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w))==0 def wardq_test_basic(n=100,k=5): # Basic check of ward's algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = ward_quick(G, x) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w))==0 def wardq_test_2(): # Do we handle case of graph with too many connected components? np.random.seed(0) n = 100 k = 5 x = np.random.randn(n, 2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x, k) t = ward_quick(G, x) u = t.split(2) assert u.max() == 2 def wardf_test(n=100,k=5): np.random.seed(0) x = np.random.randn(n,2) x[:int(0.7*n)] += 3 G = knn(x, 5) F = field_from_graph_and_data(G, x) u, cost = ward_field_segment(F, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w)) == 0 def wards_test_basic(n=100,k=5): # Basic check of ward's segmentation algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) u,cost = ward_segment(G, x, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert np.sum(w*(1-w)) == 0 def wards_test_3(): # Check ward_segment np.random.seed(0) n = 100 k = 5 x = np.random.randn(n,2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x,k) u,cost = ward_segment(G, x, qmax=2) assert u.max() == 2 def cost_test(n=100, k=5): # check that cost.max() is equal to the data variance np.random.seed(0) x = np.random.randn(n, 2) G = knn(x, k) u, cost = ward_segment(G, x) assert np.abs(cost.max()/(n*np.var(x,0).sum()) - 1) < 1e-6 def ward_test_more(n=100, k=5, verbose=0): # Check that two implementations give the same result np.random.seed(0) X = randn(n,2) X[:int(math.ceil(n / 3))] += 5 G = knn(X, 5) u,c = ward_segment(G, X, stop=-1, qmax=1, verbose=verbose) u1,c = ward_segment(G, X, stop=-1, qmax=k, verbose=verbose) u,c = ward_quick_segment(G, X, stop=-1, qmax=1, verbose=verbose) u2,c = ward_quick_segment(G, X, stop=-1, qmax=k, verbose=verbose) assert np.sum(u1==u2) == n nipy-0.6.1/nipy/algorithms/clustering/tests/test_imm.py000066400000000000000000000144011470056100100233330ustar00rootroot00000000000000""" Test the Infinite GMM. Author : Bertrand Thirion, 2010 """ import numpy as np from numpy.testing import assert_array_equal from ..imm import IMM, MixedIMM, co_labelling def test_colabel(): # test the co_labelling functionality z = np.array([0,1,1,0,2]) c = co_labelling(z).todense() tc = np.array([[ 1., 0., 0., 1., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 1., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) assert_array_equal(c, tc) def test_imm_loglike_1D(): # Check that the log-likelihood of the data under the infinite gaussian # mixture model is close to the theoretical data likelihood n = 100 dim = 1 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert np.absolute(theoretical_ll-empirical_ll)<0.25*dim def test_imm_loglike_known_groups(): # Check that the log-likelihood of the data under IGMM close to theory n = 50 dim = 1 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) kfold = np.floor(np.random.rand(n)*5).astype(np.int_) # warming igmm.sample(x, niter=100) # sampling like = igmm.sample(x, niter=300, kfold=kfold) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert np.absolute(theoretical_ll-empirical_ll)<0.25*dim def test_imm_loglike_1D_k10(): # Check with k-fold cross validation (k=10) n = 50 dim = 1 alpha = .5 k = 5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, kfold=k) # sampling like = igmm.sample(x, niter=300, kfold=k) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() # Result susceptible to random number output. See: # https://github.com/nipy/nipy/issues/418 assert np.absolute(theoretical_ll-empirical_ll) < 0.27 * dim def test_imm_loglike_2D_fast(): # Faster version for log-likelihood imm n = 100 dim = 2 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert np.absolute(theoretical_ll-empirical_ll)<0.25*dim def test_imm_loglike_2D(): # Slower cross-validated logL check n = 50 dim = 2 alpha = .5 k = 5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True, kfold=k) # sampling like = igmm.sample(x, niter=300, kfold=k) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert np.absolute(theoretical_ll-empirical_ll)<0.25*dim def test_imm_loglike_2D_a0_1(): # Check with alpha=.1 n = 100 dim = 2 alpha = .1 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() print(theoretical_ll, empirical_ll) assert np.absolute(theoretical_ll-empirical_ll)<0.2*dim def test_imm_wnc(): # Test the basic imm_wnc n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:int(.3 * n)] *= .2 x[:int(.1 * n)] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = 0.5*np.ones(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) g = np.reshape(np.linspace(0, 1, 101), (101, dim)) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300, sampling_points=g) # the density should sum to 1 ds = 0.01*like.sum() assert ds<1 assert ds>.8 assert np.sum(pproba>.5)>1 assert np.sum(pproba<.5)>1 def test_imm_wnc1(): # Test the basic imm_wnc, where the probaility under the null is random n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:int(.3 * n)] *= .2 x[:int(.1 * n)] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.random.rand(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) g = np.reshape(np.linspace(0, 1, 101), (101, dim)) #sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300, sampling_points=g) # the density should sum to 1 ds = 0.01*like.sum() assert ds<1 assert ds>.8 assert np.sum(pproba>.5)>1 assert np.sum(pproba<.5)>1 def test_imm_wnc2(): # Test the basic imm_wnc when null class is shrunk to 0 n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:int(.3 * n)] *= .2 x[:int(.1 * n)] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.zeros(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300) assert like.min()>.1 assert like.max()<5. assert_array_equal(pproba, ncp) def test_imm_wnc3(): # Test the basic imm_wnc when null class is of prob 1 (nothing is estimated) n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:int(.3 * n)] *= .2 x[:int(.1 * n)] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.ones(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300) assert_array_equal(pproba, ncp) nipy-0.6.1/nipy/algorithms/clustering/tests/test_vmm.py000066400000000000000000000047051470056100100233560ustar00rootroot00000000000000""" Test the Von-Mises-Fisher mixture model Author : Bertrand Thirion, 2010 """ from unittest import skipIf import numpy as np from nibabel.optpkg import optional_package from ..von_mises_fisher_mixture import ( VonMisesMixture, select_vmm, select_vmm_cv, sphere_density, ) matplotlib, HAVE_MPL, _ = optional_package('matplotlib') needs_mpl = skipIf(not HAVE_MPL, "Test needs matplotlib") def test_spherical_area(): # test the co_labelling functionality points, area = sphere_density(100) assert np.abs(area.sum()-4*np.pi)<1.e-2 def test_von_mises_fisher_density(): # test that a density is indeed computed on the unit sphere for a # one-component and three-component model (k == 1, 3) x = np.random.randn(100, 3) x = (x.T/np.sqrt(np.sum(x**2, 1))).T s, area = sphere_density(100) for k in (1, 3): for precision in [.1, 1., 10., 100.]: for null_class in (False, True): vmd = VonMisesMixture(k, precision, null_class=null_class) vmd.estimate(x) # check that it sums to 1 assert (np.abs((vmd.mixture_density(s)*area).sum() - 1) < 1e-2) @needs_mpl def test_von_mises_fisher_show(): # Smoke test for VonMisesMixture.show x = np.random.randn(100, 3) x = (x.T/np.sqrt(np.sum(x**2, 1))).T vmd = VonMisesMixture(1, 1) # Need to estimate to avoid error in show vmd.estimate(x) # Check that show does not raise an error vmd.show(x) def test_dimension_selection_bic(): # Tests whether dimension selection yields correct results x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(200, 3) * .1 x[:40] += x1 x[40:150] += x2 x[150:] += x3 x = (x.T / np.sqrt(np.sum(x**2, 1))).T precision = 100. my_vmm = select_vmm(list(range(1,8)), precision, False, x) assert my_vmm.k == 3 def test_dimension_selection_cv(): # Tests the dimension selection using cross validation x1 = [1, 0, 0] x2 = [-1, 0, 0] x = np.random.randn(20, 3)*.1 x[0::2] += x1 x[1::2] += x2 x = (x.T / np.sqrt(np.sum(x**2,1))).T precision = 50. sub = np.repeat(np.arange(10), 2) my_vmm = select_vmm_cv(list(range(1,8)), precision, x, cv_index=sub, null_class=False, ninit=5) z = np.argmax(my_vmm.responsibilities(x), 1) assert len(np.unique(z))>1 assert len(np.unique(z))<4 nipy-0.6.1/nipy/algorithms/clustering/utils.py000066400000000000000000000152551470056100100215200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: #from _clustering import * #from _clustering import __doc__ import numpy as np def kmeans(X, nbclusters=2, Labels=None, maxiter=300, delta=0.0001, verbose=0, ninit=1): """ kmeans clustering algorithm Parameters ---------- X: array of shape (n,p): n = number of items, p = dimension data array nbclusters (int), the number of desired clusters Labels = None array of shape (n) prior Labels. if None or inadequate a random initialization is performed. maxiter=300 (int), the maximum number of iterations before convergence delta: float, optional, the relative increment in the results before declaring convergence. verbose: verbosity mode, optional ninit: int, optional, number of random initializations Returns ------- Centers: array of shape (nbclusters, p), the centroids of the resulting clusters Labels : array of size n, the discrete labels of the input items J (float): the final value of the inertia criterion """ nbitems = X.shape[0] if nbitems < 1: if verbose: raise ValueError(" I need at least one item to cluster") if np.size(X.shape) > 2: if verbose: raise ValueError("Please enter a two-dimensional array \ for clustering") if np.size(X.shape) == 1: X = np.reshape(X, (nbitems, 1)) X = X.astype('d') nbclusters = int(nbclusters) if nbclusters < 1: if verbose: print(" cannot compute less than 1 cluster") nbclusters = 1 if nbclusters > nbitems: if verbose: print(" cannot find more clusters than items") nbclusters = nbitems if (ninit < 1) & verbose: print("making at least one iteration") ninit = np.maximum(int(ninit), 1) if Labels is not None: if np.size(Labels) == nbitems: Labels = Labels.astype(np.int_) OK = (Labels.min() > -1) & (Labels.max() < nbclusters + 1) if OK: maxiter = int(maxiter) if maxiter > 0: delta = float(delta) if delta < 0: if verbose: print("incorrect stopping criterion - ignored") delta = 0.0001 else: pass else: if verbose: print("incorrect number of iterations - ignored") maxiter = 300 else: if verbose: print("incorrect labelling - ignored") else: if verbose: print("incompatible number of labels provided - ignored") Centers, labels, J = _kmeans(X, nbclusters, Labels, maxiter, delta, ninit) return Centers, labels, J def _MStep(x, z, k): """Computation of cluster centers/means Parameters ---------- x array of shape (n,p) where n = number of samples, p = data dimension z, array of shape (x.shape[0]) current assignment k, int, number of desired clusters Returns ------- centers, array of shape (k,p) the resulting centers """ dim = x.shape[1] centers = np.repeat(np.reshape(x.mean(0), (1, dim)), k, 0) for q in range(k): if np.sum(z == q) == 0: pass else: centers[q] = np.mean(x[z == q], 0) return centers def _EStep(x, centers): """ Computation of the input-to-cluster assignment Parameters ---------- x array of shape (n,p) n = number of items, p = data dimension centers, array of shape (k,p) the cluster centers Returns ------- z vector of shape(n), the resulting assignment """ nbitem = x.shape[0] z = - np.ones(nbitem).astype(np.int_) mindist = np.inf * np.ones(nbitem) k = centers.shape[0] for q in range(k): dist = np.sum((x - centers[q]) ** 2, 1) z[dist < mindist] = q mindist = np.minimum(dist, mindist) J = mindist.sum() return z, J def voronoi(x, centers): """ Assignment of data items to nearest cluster center Parameters ---------- x array of shape (n,p) n = number of items, p = data dimension centers, array of shape (k, p) the cluster centers Returns ------- z vector of shape(n), the resulting assignment """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) if np.size(centers) == centers.shape[0]: centers = np.reshape(centers, (np.size(centers), 1)) if x.shape[1] != centers.shape[1]: raise ValueError("Inconsistent dimensions for x and centers") return _EStep(x, centers)[0] def _kmeans(X, nbclusters=2, Labels=None, maxiter=300, delta=1.e-4, ninit=1, verbose=0): """ kmeans clustering algorithm Parameters ---------- X: array of shape (n,p): n = number of items, p = dimension data array nbclusters (int), the number of desired clusters Labels: array of shape (n) prior Labels, optional if None or inadequate a random initialization is performed. maxiter: int, optional the maximum number of iterations before convergence delta: float, optional the relative increment in the results before declaring convergence. verbose=0: verboseity mode Returns ------- Centers: array of shape (nbclusters, p), the centroids of the resulting clusters Labels: array of size n, the discrete labels of the input items J, float, the final value of the inertia criterion """ # fixme: do the checks nbitem = X.shape[0] vdata = np.mean(np.var(X, 0)) bJ = np.inf for it in range(ninit): # init if Labels is None: seeds = np.argsort(np.random.rand(nbitem))[:nbclusters] centers = X[seeds] else: centers = _MStep(X, Labels, nbclusters) centers_old = centers.copy() # iterations for i in range(maxiter): z, J = _EStep(X, centers) centers = _MStep(X, z, nbclusters) if verbose: print(i, J) if np.sum((centers_old - centers) ** 2) < delta * vdata: if verbose: print(i) break centers_old = centers.copy() if J < bJ: bJ = J centers_output = centers.copy() z_output = z.copy() else: centers_output = centers z_output = z return centers_output, z_output, bJ nipy-0.6.1/nipy/algorithms/clustering/von_mises_fisher_mixture.py000066400000000000000000000323521470056100100254740ustar00rootroot00000000000000""" Implementation of Von-Mises-Fisher Mixture models, i.e. the equivalent of mixture of Gaussian on the sphere. Author: Bertrand Thirion, 2010-2011 """ from warnings import warn import numpy as np warn('Module nipy.algorithms.clustering.von_mises_fisher_mixture deprecated, ' 'will be removed', FutureWarning, stacklevel=2) class VonMisesMixture: """ Model for Von Mises mixture distribution with fixed variance on a two-dimensional sphere """ def __init__(self, k, precision, means=None, weights=None, null_class=False): """ Initialize Von Mises mixture Parameters ---------- k: int, number of components precision: float, the fixed precision parameter means: array of shape(self.k, 3), optional input component centers weights: array of shape(self.k), optional input components weights null_class: bool, optional Inclusion of a null class within the model (related to k=0) fixme ----- consistency checks """ self.k = k self.dim = 2 self.em_dim = 3 self.means = means self.precision = precision self.weights = weights self.null_class = null_class def log_density_per_component(self, x): """Compute the per-component density of the data Parameters ---------- x: array of shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k), with non-negative values the density """ n = x.shape[0] constant = self.precision / (2 * np.pi * (1 - np.exp( - \ 2 * self.precision))) loglike = np.log(constant) + \ (np.dot(x, self.means.T) - 1) * self.precision if self.null_class: loglike = np.hstack((np.log(1. / (4 * np.pi)) * np.ones((n, 1)), loglike)) return loglike def density_per_component(self, x): """ Compute the per-component density of the data Parameters ---------- x: array of shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k), with non-negative values the density """ return np.exp(self.log_density_per_component(x)) def weighted_density(self, x): """ Return weighted density Parameters ---------- x: array shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k) """ return(self.density_per_component(x) * self.weights) def log_weighted_density(self, x): """ Return log weighted density Parameters ---------- x: array of shape(n,3) should be on the unit sphere Returns ------- log_like: array of shape(n, self.k) """ return(self.log_density_per_component(x) + np.log(self.weights)) def mixture_density(self, x): """ Return mixture density Parameters ---------- x: array of shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n) """ wl = self.weighted_density(x) return np.sum(wl, 1) def responsibilities(self, x): """ Return responsibilities Parameters ---------- x: array of shape(n,3) should be on the unit sphere Returns ------- resp: array of shape(n, self.k) """ lwl = self.log_weighted_density(x) wl = np.exp(lwl.T - lwl.mean(1)).T swl = np.sum(wl, 1) resp = (wl.T / swl).T return resp def estimate_weights(self, z): """ Calculate and set weights from `z` Parameters ---------- z: array of shape(self.k) """ self.weights = np.sum(z, 0) / z.sum() def estimate_means(self, x, z): """ Calculate and set means from `x` and `z` Parameters ---------- x: array of shape(n,3) should be on the unit sphere z: array of shape(self.k) """ m = np.dot(z.T, x) self.means = (m.T / np.sqrt(np.sum(m ** 2, 1))).T def estimate(self, x, maxiter=100, miniter=1, bias=None): """ Return average log density across samples Parameters ---------- x: array of shape (n,3) should be on the unit sphere maxiter : int, optional maximum number of iterations of the algorithms miniter : int, optional minimum number of iterations bias : array of shape(n), optional prior probability of being in a non-null class Returns ------- ll : float average (across samples) log-density """ # initialization with random positions and constant weights if self.weights is None: self.weights = np.ones(self.k) / self.k if self.null_class: self.weights = np.ones(self.k + 1) / (self.k + 1) if self.means is None: aux = np.arange(x.shape[0]) np.random.shuffle(aux) self.means = x[aux[:self.k]] # EM algorithm assert not(np.isnan(self.means).any()) pll = - np.inf for i in range(maxiter): ll = np.log(self.mixture_density(x)).mean() z = self.responsibilities(x) assert not(np.isnan(z).any()) # bias z if bias is not None: z[:, 0] *= (1 - bias) z[:, 1:] = ((z[:, 1:].T) * bias).T z = (z.T / np.sum(z, 1)).T self.estimate_weights(z) if self.null_class: self.estimate_means(x, z[:, 1:]) else: self.estimate_means(x, z) assert not(np.isnan(self.means).any()) if (i > miniter) and (ll < pll + 1.e-6): break pll = ll return ll def show(self, x): """ Visualization utility Parameters ---------- x: array of shape (n, 3) should be on the unit sphere Notes ----- Uses ``matplotlib``. """ # label the data z = np.argmax(self.responsibilities(x), 1) import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 fig = plt.figure() ax = p3.Axes3D(fig) colors = (['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] * \ (1 + (1 + self.k) // 8))[:self.k + 1] if (self.null_class) and (z == 0).any(): ax.plot3D(x[z == 0, 0], x[z == 0, 1], x[z == 0, 2], '.', color=colors[0]) for k in range(self.k): if self.null_class: if np.sum(z == (k + 1)) == 0: continue uk = z == (k + 1) ax.plot3D(x[uk, 0], x[uk, 1], x[uk, 2], '.', color=colors[k + 1]) ax.plot3D([self.means[k, 0]], [self.means[k, 1]], [self.means[k, 2]], 'o', color=colors[k + 1]) else: if np.sum(z == k) == 0: continue ax.plot3D(x[z == k, 0], x[z == k, 1], x[z == k, 2], '.', color=colors[k]) ax.plot3D([self.means[k, 0]], [self.means[k, 1]], [self.means[k, 2]], 'o', color=colors[k]) plt.show() def estimate_robust_vmm(k, precision, null_class, x, ninit=10, bias=None, maxiter=100): """ Return the best von_mises mixture after severla initialization Parameters ---------- k: int, number of classes precision: float, priori precision parameter null class: bool, optional, should a null class be included or not x: array of shape(n,3) input data, should be on the unit sphere ninit: int, optional, number of iterations bias: array of shape(n), optional prior probability of being in a non-null class maxiter: int, optional, maximum number of iterations after each initialization """ score = - np.inf for i in range(ninit): aux = VonMisesMixture(k, precision, null_class=null_class) ll = aux.estimate(x, bias=bias) if ll > score: best_model = aux score = ll return best_model def select_vmm(krange, precision, null_class, x, ninit=10, bias=None, maxiter=100, verbose=0): """Return the best von_mises mixture after severla initialization Parameters ---------- krange: list of ints, number of classes to consider precision: null class: x: array of shape(n,3) should be on the unit sphere ninit: int, optional, number of iterations maxiter: int, optional, bias: array of shape(n), a prior probability of not being in the null class verbose: Bool, optional """ score = - np.inf for k in krange: aux = estimate_robust_vmm(k, precision, null_class, x, ninit, bias, maxiter) ll = aux.estimate(x) if null_class: bic = ll - np.log(x.shape[0]) * k * 3 / x.shape[0] else: bic = ll - np.log(x.shape[0]) * (k * 3 - 1) / x.shape[0] if verbose: print(k, bic) if bic > score: best_model = aux score = bic return best_model def select_vmm_cv(krange, precision, x, null_class, cv_index, ninit=5, maxiter=100, bias=None, verbose=0): """Return the best von_mises mixture after severla initialization Parameters ---------- krange: list of ints, number of classes to consider precision: float, precision parameter of the von-mises densities x: array of shape(n, 3) should be on the unit sphere null class: bool, whether a null class should be included or not cv_index: set of indices for cross validation ninit: int, optional, number of iterations maxiter: int, optional, bias: array of shape (n), prior """ score = - np.inf mll = [] for k in krange: mll.append( - np.inf) for j in range(1): ll = np.zeros_like(cv_index).astype(np.float64) for i in np.unique(cv_index): xl = x[cv_index != i] xt = x[cv_index == i] bias_l = None if bias is not None: bias_l = bias[cv_index != i] aux = estimate_robust_vmm(k, precision, null_class, xl, ninit=ninit, bias=bias_l, maxiter=maxiter) if bias is None: ll[cv_index == i] = np.log(aux.mixture_density(xt)) else: bias_t = bias[cv_index == i] lwd = aux.weighted_density(xt) ll[cv_index == i] = np.log(lwd[:, 0] * (1 - bias_t) + \ lwd[:, 1:].sum(1) * bias_t) if ll.mean() > mll[-1]: mll[-1] = ll.mean() aux = estimate_robust_vmm(k, precision, null_class, x, ninit, bias=bias, maxiter=maxiter) if verbose: print(k, mll[ - 1]) if mll[ - 1] > score: best_model = aux score = mll[ - 1] return best_model def sphere_density(npoints): """Return the points and area of a npoints**2 points sampled on a sphere Returns ------- s : array of shape(npoints ** 2, 3) area: array of shape(npoints) """ u = np.linspace(0, 2 * np.pi, npoints + 1)[:npoints] v = np.linspace(0, np.pi, npoints + 1)[:npoints] s = np.vstack((np.ravel(np.outer(np.cos(u), np.sin(v))), np.ravel(np.outer(np.sin(u), np.sin(v))), np.ravel(np.outer(np.ones(np.size(u)), np.cos(v))))).T area = np.abs(np.ravel(np.outer(np.ones(np.size(u)), np.sin(v)))) * \ np.pi ** 2 * 2 * 1. / (npoints ** 2) return s, area def example_noisy(): x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(200, 3) * .1 x[:30] += x1 x[40:150] += x2 x[150:] += x3 x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T precision = 100. vmm = select_vmm(list(range(2, 7)), precision, True, x) vmm.show(x) # check that it sums to 1 s, area = sphere_density(100) print((vmm.mixture_density(s) * area).sum()) def example_cv_nonoise(): x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(30, 3) * .1 x[0::3] += x1 x[1::3] += x2 x[2::3] += x3 x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T precision = 50. sub = np.repeat(np.arange(10), 3) vmm = select_vmm_cv(list(range(1, 8)), precision, x, cv_index=sub, null_class=False, ninit=20) vmm.show(x) # check that it sums to 1 s, area = sphere_density(100) return vmm nipy-0.6.1/nipy/algorithms/diagnostics/000077500000000000000000000000001470056100100201265ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/diagnostics/__init__.py000066400000000000000000000004601470056100100222370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # Initialization for diagnostics package from ..utils import pca from .screens import screen from .timediff import time_slice_diffs from .tsdiffplot import plot_tsdiffs, plot_tsdiffs_image nipy-0.6.1/nipy/algorithms/diagnostics/commands.py000066400000000000000000000150731470056100100223070ustar00rootroot00000000000000""" Implementation of diagnostic command line tools Tools are: * nipy_diagnose * nipy_tsdiffana This module has the logic for each command. The command script files deal with argument parsing and any custom imports. The implementation here accepts the ``args`` object from ``argparse`` and does the work. """ from os.path import join as pjoin from os.path import split as psplit import numpy as np from nibabel import AnalyzeHeader from nibabel.filename_parser import splitext_addext import nipy from .screens import screen, write_screen_res from .timediff import time_slice_diffs_image from .tsdiffplot import plot_tsdiffs def parse_fname_axes(img_fname, time_axis, slice_axis): """ Load `img_fname`, check `time_axis`, `slice_axis` or use default Parameters ---------- img_fname : str filename of image on which to do diagnostics time_axis : None or str or int, optional Axis indexing time-points. None is default, will be replaced by a value of 't'. If `time_axis` is an integer, gives the index of the input (domain) axis of `img`. If `time_axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. slice_axis : None or str or int, optional Axis indexing MRI slices. If `slice_axis` is an integer, gives the index of the input (domain) axis of `img`. If `slice_axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. If None (the default) then 1) try the name 'slice' to select the axis - if this fails, and `fname` refers to an Analyze type image (such as Nifti), then 2) default to the third image axis, otherwise 3) raise a ValueError Returns ------- img : ``Image`` instance Image as loaded from `img_fname` time_axis : int or str Time axis, possibly filled with default slice_axis : int or str Slice axis, possibly filled with default """ # Check whether this is an Analyze-type image img = nipy.load_image(img_fname) # Check for axes if time_axis is not None: # Try converting to an integer in case that was what was passed try: time_axis = int(time_axis) except ValueError: # Maybe a string pass else: # was None time_axis = 't' if slice_axis is not None: # Try converting to an integer in case that was what was passed try: slice_axis = int(slice_axis) except ValueError: # Maybe a string pass else: # slice axis was None - search for default input_names = img.coordmap.function_domain.coord_names is_analyze = ('header' in img.metadata and isinstance(img.metadata['header'], AnalyzeHeader)) if 'slice' in input_names: slice_axis = 'slice' elif is_analyze and img.ndim == 4: slice_axis = 2 else: raise ValueError('No slice axis specified, not analyze type ' 'image; refusing to guess') return img, time_axis, slice_axis def tsdiffana(args): """ Generate tsdiffana plots from command line params `args` Parameters ---------- args : object object with attributes * filename : str - 4D image filename * out_file : str - graphics file to write to instead of leaving graphics on screen * time_axis : str - name or number of time axis in `filename` * slice_axis : str - name or number of slice axis in `filename` * write_results : bool - if True, write images and plots to files * out_path : None or str - path to which to write results * out_fname_label : None or filename - suffix of output results files Returns ------- axes : Matplotlib axes Axes on which we have done the plots. """ if args.out_file is not None and args.write_results: raise ValueError("Cannot have OUT_FILE and WRITE_RESULTS options " "together") img, time_axis, slice_axis = parse_fname_axes(args.filename, args.time_axis, args.slice_axis) results = time_slice_diffs_image(img, time_axis, slice_axis) axes = plot_tsdiffs(results) if args.out_file is None and not args.write_results: # interactive mode return axes if args.out_file is not None: # plot only mode axes[0].figure.savefig(args.out_file) return axes # plot and images mode froot, ext, addext = splitext_addext(args.filename) fpath, fbase = psplit(froot) fpath = fpath if args.out_path is None else args.out_path fbase = fbase if args.out_fname_label is None else args.out_fname_label axes[0].figure.savefig(pjoin(fpath, 'tsdiff_' + fbase + '.png')) # Save image volumes for key, prefix in (('slice_diff2_max_vol', 'dv2_max_'), ('diff2_mean_vol', 'dv2_mean_')): fname = pjoin(fpath, prefix + fbase + ext + addext) nipy.save_image(results[key], fname) # Save time courses into npz np.savez(pjoin(fpath, 'tsdiff_' + fbase + '.npz'), volume_means=results['volume_means'], slice_mean_diff2=results['slice_mean_diff2'], ) return axes def diagnose(args): """ Calculate, write results from diagnostic screen Parameters ---------- args : object object with attributes: * filename : str - 4D image filename * time_axis : str - name or number of time axis in `filename` * slice_axis : str - name or number of slice axis in `filename` * out_path : None or str - path to which to write results * out_fname_label : None or filename - suffix of output results files * ncomponents : int - number of PCA components to write images for Returns ------- res : dict Results of running :func:`screen` on `filename` """ img, time_axis, slice_axis = parse_fname_axes(args.filename, args.time_axis, args.slice_axis) res = screen(img, args.ncomponents, time_axis, slice_axis) froot, ext, addext = splitext_addext(args.filename) fpath, fbase = psplit(froot) fpath = fpath if args.out_path is None else args.out_path fbase = fbase if args.out_fname_label is None else args.out_fname_label write_screen_res(res, fpath, fbase, ext + addext) return res nipy-0.6.1/nipy/algorithms/diagnostics/screens.py000066400000000000000000000136701470056100100221510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Diagnostic 4d image screen ''' import warnings from os.path import join as pjoin import numpy as np from ...core.api import Image, drop_io_dim from ...core.reference.coordinate_map import AxisError, input_axis_index from ...io.api import save_image from ..utils import pca from .timediff import time_slice_diffs from .tsdiffplot import plot_tsdiffs def screen(img4d, ncomp=10, time_axis='t', slice_axis=None): ''' Diagnostic screen for 4d FMRI image Includes PCA, tsdiffana and mean, std, min, max images. Parameters ---------- img4d : ``Image`` 4d image file ncomp : int, optional number of component images to return. Default is 10 time_axis : str or int, optional Axis over which to do PCA, time difference analysis. Defaults to `t` slice_axis : None or str or int, optional Name or index of input axis over which to do slice analysis for time difference analysis. If None, look for input axis ``slice``. At the moment we then assume slice is the last non-time axis, but this last guess we will remove in future versions of nipy. The default will then be 'slice' and you'll get an error if there is no axis named 'slice'. Returns ------- screen : dict with keys: * mean : mean image (all summaries are over last dimension) * std : standard deviation image * max : image of max * min : min * pca : 4D image of PCA component images * pca_res : dict of results from PCA * ts_res : dict of results from tsdiffana Examples -------- >>> import nipy as ni >>> from nipy.testing import funcfile >>> img = ni.load_image(funcfile) >>> screen_res = screen(img) >>> screen_res['mean'].ndim 3 >>> screen_res['pca'].ndim 4 ''' if img4d.ndim != 4: raise ValueError('Expecting a 4d image') data = img4d.get_fdata() cmap = img4d.coordmap # Get numerical index for time axis in data array time_axis = input_axis_index(cmap, time_axis) # Get numerical index for slice axis in data array if slice_axis is None: try: slice_axis = input_axis_index(cmap, 'slice') except AxisError: warnings.warn( 'Future versions of nipy will not guess the slice axis ' 'from position, but only from axis name == "slice"; ' 'Please specify the slice axis by name or index to avoid ' 'this warning', FutureWarning, stacklevel=2) slice_axis = 2 if time_axis == 3 else 3 else: slice_axis = input_axis_index(cmap, slice_axis) # 3D coordinate map for summary images cmap_3d = drop_io_dim(cmap, 't') screen_res = {} # standard processed images screen_res['mean'] = Image(np.mean(data, axis=time_axis), cmap_3d) screen_res['std'] = Image(np.std(data, axis=time_axis), cmap_3d) screen_res['max'] = Image(np.max(data, axis=time_axis), cmap_3d) screen_res['min'] = Image(np.min(data, axis=time_axis), cmap_3d) # PCA screen_res['pca_res'] = pca.pca_image(img4d, axis=time_axis, standardize=False, ncomp=ncomp) screen_res['pca'] = screen_res['pca_res']['basis_projections'] # tsdiffana screen_res['ts_res'] = time_slice_diffs(data, time_axis=time_axis, slice_axis=slice_axis) return screen_res def write_screen_res(res, out_path, out_root, out_img_ext='.nii', pcnt_var_thresh=0.1): ''' Write results from ``screen`` to disk as images Parameters ---------- res : dict output from ``screen`` function out_path : str directory to which to write output images out_root : str part of filename between image-specific prefix and image-specific extension to use for writing images out_img_ext : str, optional extension (identifying image type) to which to write volume images. Default is '.nii' pcnt_var_thresh : float, optional threshold below which we do not plot percent variance explained by components; default is 0.1. This removes the long tail from percent variance plots. Returns ------- None ''' import matplotlib.pyplot as plt # save volume images for key in ('mean', 'min', 'max', 'std', 'pca'): fname = pjoin(out_path, f'{key}_{out_root}{out_img_ext}') save_image(res[key], fname) # plot, save component time courses and some tsdiffana stuff pca_axis = res['pca_res']['axis'] n_comp = res['pca_res']['basis_projections'].shape[pca_axis] vectors = res['pca_res']['basis_vectors'] pcnt_var = res['pca_res']['pcnt_var'] np.savez(pjoin(out_path, f'vectors_components_{out_root}.npz'), basis_vectors=vectors, pcnt_var=pcnt_var, volume_means=res['ts_res']['volume_means'], slice_mean_diff2=res['ts_res']['slice_mean_diff2'], ) plt.figure() for c in range(n_comp): plt.subplot(n_comp, 1, c+1) plt.plot(vectors[:,c]) plt.axis('tight') plt.suptitle(out_root + ': PCA basis vectors') plt.savefig(pjoin(out_path, f'components_{out_root}.png')) # plot percent variance plt.figure() plt.plot(pcnt_var[pcnt_var >= pcnt_var_thresh]) plt.axis('tight') plt.suptitle(out_root + ': PCA percent variance') plt.savefig(pjoin(out_path, f'pcnt_var_{out_root}.png')) # plot tsdiffana plt.figure() axes = [plt.subplot(4, 1, i+1) for i in range(4)] plot_tsdiffs(res['ts_res'], axes) plt.suptitle(out_root + ': tsdiffana') plt.savefig(pjoin(out_path, f'tsdiff_{out_root}.png')) nipy-0.6.1/nipy/algorithms/diagnostics/tests/000077500000000000000000000000001470056100100212705ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/diagnostics/tests/__init__.py000066400000000000000000000000521470056100100233760ustar00rootroot00000000000000# Making diagnostics tests into a package nipy-0.6.1/nipy/algorithms/diagnostics/tests/data/000077500000000000000000000000001470056100100222015ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/diagnostics/tests/data/generate_tsdiff_results.m000066400000000000000000000007601470056100100272740ustar00rootroot00000000000000% matlab script to regenerate tsdiff results % % First copy nipy.testing.functional.nii.gz to current working directory % % gunzip functional.nii.gz % % Make sure ``timediff.m`` in this directory is on your matlab path, as % is SPM >= version 5 P = spm_select('ExtList', pwd, '^functional\.nii', 1:20); [imgdiff g slicediff] = timediff(P); diff2_mean_vol = spm_read_vols(spm_vol('vscmeanfunctional.nii')); slice_diff2_max_vol = spm_read_vols(spm_vol('vsmaxfunctional.nii')); save tsdiff_results nipy-0.6.1/nipy/algorithms/diagnostics/tests/data/timediff.m000066400000000000000000000072461470056100100241570ustar00rootroot00000000000000function [imdiff, g, slicediff] = timediff(imgs, flags) % Analyses slice by slice variance across time series % FORMAT [imdiff, g, slicediff] = timediff(imgs, flags) % % imgs - string or cell or spm_vol list of images % flags - specify options; if contains: % m - create mean var image (vmean*), max slice var image % (vsmax*) and scan to scan variance image (vscmean*) % v - create variance image for between each time point % % imdiff - mean variance between each image in time series % g - mean voxel signal intensity for each image % slicediff - slice by slice variance between each image % % Matthew Brett 17/7/00 [imdiff, g, slicediff] = deal([]); if nargin < 1 imgs = []; end if isempty(imgs) imgs = cbu_get_imgs(Inf, 'Select time series images'); end if isempty(imgs), return, end if iscell(imgs) imgs = char(imgs); end if ischar(imgs) imgs = spm_vol(imgs); end if nargin < 2 flags = 'm'; end nimgs = size(imgs,1); if isempty(nimgs) | nimgs < 2 return end V1 = imgs(1); Vr = imgs(2:end); ndimgs = nimgs-1; Hold = 0; if any(flags == 'v') % create variance images for i = 1:ndimgs vVr(i) = makevol(Vr(i),'v',16); % float end end if any(flags == 'm') % mean /max variance mVr = makevol(V1,'vmean',16); sVr = makevol(V1,'vscmean',16); xVr = makevol(V1,'vsmax',16); end [xydim zno] = deal(V1.dim(1:2),V1.dim(3)); p1 = spm_read_vols(V1); slicediff = zeros(ndimgs,zno); g = zeros(ndimgs,1); for z = 1:zno % across slices M = spm_matrix([0 0 z]); pr = p1(:,:,z); % this slice from first volume if any(flags == 'm') [mv sx2 sx mxvs] = deal(zeros(size(pr))); end % SVD is squared voxel difference (usually a slice of same) % MSVD is the mean of this measure across voxels (one value) % DTP is a difference time point (1:T-1) cmax = 0; % counter for which slice has the largest MSVD % note that Vr contains volumes 2:T (not the first) for i = 1:ndimgs % across DTPs c = spm_slice_vol(Vr(i),M,xydim,Hold); % get slice from this time point v = (c - pr).^2; % SVD from this slice to last slicediff(i,z) = mean(v(:)) % MSVD for this slice g(i) = g(i) + mean(c(:)); % simple mean of data if slicediff(i,z)>cmax % if this slice has larger MSVD, keep mxvs = v; cmax = slicediff(i,z); end pr = c; % set current slice data as previous, for next iteration of loop if any(flags == 'v') % write individual SVD slice for DTP vVr(i) = spm_write_plane(vVr(i),v,z); end if any(flags == 'm') mv = mv + v; % sum up SVDs for mean SVD (across time points) sx = sx + c; % sum up data for simple variance calculation sx2 = sx2 + c.^2; % sum up squared data for simple variance % calculation end end if any(flags == 'm') % mean variance etc sVr = spm_write_plane(sVr,mv/(ndimgs),z); % write mean of SVDs % across time xVr = spm_write_plane(xVr,mxvs,z); % write maximum SVD mVr = spm_write_plane(mVr,(sx2-((sx.^2)/ndimgs))./(ndimgs-1),z); % (above) this is the one-pass simple variance formula end end if any(findstr(spm('ver'), '99')) spm_close_vol([vVr sVr xVr mVr]); end g = [mean(p1(:)); g/zno]; imdiff = mean(slicediff')'; return function Vo = makevol(Vi, prefix, datatype) Vo = Vi; fn = Vi.fname; [p f e] = fileparts(fn); Vo.fname = fullfile(p, [prefix f e]); switch spm('ver') case {'SPM5','SPM8','SPM8b'} Vo.dt = [datatype 0]; Vo = spm_create_vol(Vo, 'noopen'); case 'SPM2' Vo.dim(4) = datatype; Vo = spm_create_vol(Vo, 'noopen'); case 'SPM99' Vo.dim(4) = datatype; Vo = spm_create_image(Vo); otherwise error(sprintf('What ees thees version "%s"', spm('ver'))); end return nipy-0.6.1/nipy/algorithms/diagnostics/tests/data/tsdiff_results.mat000066400000000000000000000353571470056100100257610ustar00rootroot00000000000000MATLAB 5.0 MAT-file, Platform: GLNX86, Created on: Wed Dec 16 13:02:32 2009 IMxmI@@qEXpdn` aZM<Dz`\שT:=T%ݎ"棆)_64NfWϠ@ Tw Xո^H$gwo57pxc``l@AbF(f̴4N {_]V-A'f_[dR{Yr9[t7:,`ӬuMe bW8Maa6/ox֯tXs`-+{\W:TN#s0mtmcAGwJ:d9twjZVGxc``l@AbF0fdHҜ@x qx6H=%t 9kD9Uqػa\5/ *qZD+87DfqR^aks SkDBCѶ-=h٢)𽂧>[z@^tFÀ+?\[o`>Sy1:JDŽYr<79"I ƊLL%VK<$͉ݮn#Qsb  a,?ȵ+<鼳ŧĄe^pK:c:s=}~yKG\uLDW`jVN7+Jfp F&T21`myM`L/D{Ĺl kwum|)q<ߴ  .Y_e)ӓN}\3s}4~̓]<bN]o y4Ko$==B_.{uκJ&xכ]L 3_*9F+Wvsbid"j@ 7}U 8H..i:W|a%_l"1v_.m||v; u5!IAG o ~WˎuI;`|[g3o%NJ1q=A&YêI㿬5wf*:{nF|2056o*֙=/7 tj'1ҥP3V]$a UMۋQq^و!?"v}Eo3΄<;5Rn6Eύ[1xRWbnS/fӭt21J95=鮛o`zYʪCPS$+7iVoY!`tşj5/ϰ{ґG]c:ѽɀ/gwmڒԿOsCt+f'z\æPa[r>s0([ȝMtc#WMx49r0_*péQS8Ͻu-;>[L5˄cZۚ '~1S3R@~V|pCؕR$>XiFQN]Z&DzGC?p:Z}܏}lQ{eFx_qhܵՄ` q6C79s.\;nNkoa~oPo/-{ѕ&#1=2?#pQ'b+O>d9x<崯FcO2=ǿJ髆Qv~7: -#[(Wov ,7/)# , 8N8 #k<+c7ز|b})/xe  F.l 'E">"VD"X77HfyI4l?[x}a|S[3/UR)ȏ+>1l]eqAj$-]\ 3wmr~bٞhx_S l}PH|ƩmP?o:RZ"˜7ÅYW <3=$~|!O\!19gwj⋙>FXUGᄆ#Q.:*bn{kAfQ!eƇ#Í`>?+-Puc{EM_ֱu=%=StY<Ǹs]EKtH/̂}n)GtFI2PL+Qc}}OJҡpT"8t%}Mf=g 95i RQό3et`Ų=eԾ4plI{A$wMT.ΰ5** =є ^'ja;nJ>wV^Dɡ.rQ$E9WWsnh F5|𕙶 s[ﻌF#K?p@'lq8[47s.[tym*nXod|/%20ַPY뿲 W}w# tHQr[Oy&[`(8m0nE{yZC@9{Q]Xc擋O]Q*bB";uВ#c~.ǝЋ,߭| ~NR 3??WoW C7̋<4t̆٤_g|37!yV./Tf5#t`tAA7Nvxm! [R*ɁUc\cg~8 zf}ԋܡh|wĝqjΉ&^2 W$I76N}kG<91a;ZlU&X$ohhݶ0M@:N"Q{ٶZ2BvKH|ՆЗzfkuaE&(/s,d??EAb5+dV w/߆/;T}Yq?`=_ᄤ;)-zZtYWhoѡ+*o Ia*qɪu,/De@oI*.^p^J!T礉-BعBP[ ۽Ps5nyṳ}pwQ6Tݥ ÷2F'Ioc_Hta8ӌU~PP'X L]2M<<e3BNE>Kf{w?CeXff>>w^C_s1O vO.it_vr0L2JsdL2ٶɰ]q_٦aU2r?G[9%JnJP'OrqQtʾdcFoNiŔfB2G}N8OS5އ}OYdލ@t@=imP!*mcdj 8[ )'; _z!2aox~4Eeh1C%fZTQzS^:'ϻpiзӒk@"%^0~7?י WyAyGlk/#{7~]uwXש^!دk5D_!U{ BޱGc88o{֟ę lT6澯*ohݟ?DajJ[NL7Q߼ g};/+\Z>SE>T@Քŭ1R8ns_H=Y u9OMKiL'?3>05&bN;tG/f9VVy#>) WM;qO;lH1jїh'/4"sq3-gѿ{O`K]oH!0, w$猱f敬NY=|H3ZC=(Zv$ P>Yuj0#an\g?2I?:A83{Ov!ǩ>!~3|g~O̷z[dT/ 䟅8v"~ ,x:CQi{SsLd~z8eF "H$BEӯs',3%o2O>?cQ% S ̺TevXˁygӁ {}30`b u`h ?=(~㣿\]W2 O2*7s;0_vȁc7Θ9 we3Z^^ܛT w n;فaA}e~u +SV%eX| Uρicm Vd:0L͞q{Úv^m j6\^}$М4@sfu:04jPcTY@sw T@}%Vݑvp05[ y yũ:d@ꡊ@J 3$swi晻@%GR)/x;́hnD:KdC[CdUm,g>>|^47fxUC+K=GC +es#w8j~ &d B=۰S7nM 8P[ĴzQfń?P2QZEf+%;ڟA%T'ZrDB՞:B2 C)XQ&VJL_qm:1P|| k8 [ŸByo=B ovzp/e[E+5:ҏ K^gM"A4g ;Omnnsi(_Fk]#C ŝC,u7f3Cwjٶ0Bev*C?(afWFCG?RK[2QyO FG>"-44|7b\jrX7նB̮P}o϶¿%'UGy7I??c|56xI*Ǽ!NXzipDpy-[gu1cߞIŊþg( vf$*dX%/ }3x# 䋘4ɼrp8*<%c j̗L凳/e]nжihDܥ+SuHnS݃yZ+9a`E9ew~xBU'ZWǜ}Zm>tR}C!.$ïd"Wj:6+tGЕ#ŃB2t_xޏ&w[O*Œ=>C>|KShk['u<3m?+X19 rBQKӚͶ(nG:2/} '&-`SJ~q: ր΋KIWr4bMR;i~eOF)ͨǼR≑Ym͗X3nw&ގ!>"Ec>?+T}PoERyV72<'~!5km^珑a}̛)zBruűV |w6M)|pS*Eiv|oLv* y=Q \ $ŀo?zC96]LE_h Fܖoy9;1V>=j\Ӣ|o?a=t6L m8tMq1aF>kNBJa#WiT΢lBn}mݚOHB'{a4d :ᘷOI W/mqVs)>M OB!d~rg@"mg (sޏ][Y&Б^.ScΦ3gKstuŧ(t?vy!)~>DPMK_,tQ]ֽ}V3G{}~3z~x JK/'97Slm^T>yv߳.߱E.F "~.Ihи{~&`4%ecYy>mOt=A?0Wzw>\]ҭӣC4f τ5Ɖ2^EYd[-LvY|ϕw(S6nQROwt#f9@_‡zƓ!_w'nCg#Tx)~jnrB&wm=gR~ޝWrQ"(0= o*^WBnֺM4{:t<̻D&8آSkG} %sm_1'vy|}笋&/?;ϕUkF Z]UN/@"z_ݦRB\S#>nex.kX+ܦR0X|Q6*yy3iΝnR+BuwMh}>*r76=&%5<)Цх 7c%+w#\<=W^zۦDB+HDy|픃D\;||e*V/+&!'-7W կudB#ڣO#ϋw} ŹW-5_eysLM{˄$.sn"gٜē&uxS8G׿O@OoBmA[?]N- ⓋCO0yąu{lE`LY6Z'Vx~%μդ0WU+3 4e0۠ڲhbo 1O$_ nnq7n*+Hm_I,+\1ùʒ O!O]DN:v\R/X[K=+u?ظ̍]4.3P" >7g `COcpȲV6 _B7>oZY aX01ttD7n]g;'{vFG~j-D_IoF\;6~PP{V߭(Еb?(vN+uOvX$ |֝4]?S~|gRu%~F?Jɧ\ ^=U&SN+V^dea>:Ui|9-/Ի4rG?MGfлj!wMʳlq$|2rrUJBLJ=M,SD}ֶy<#D,Y]6Wxr{8Lp΂aC2{ j"ߝ:rB^p{<q{_sFp)xm1SuWǼȳ\=>*;qM`N$/ ߕedPatB YE-/tf8iG5U4J®AV5n3έ.'ӵE҂ϩt9" Kg+_4|8c;}qLJ#rs%'eLjl|:wD o|>K봓>'ٯq pnOmZ㋂UO*HMaۀXk#^1d ]84?; c<S?]f[SAZQ$W;4BV$ s qQ`^6n2.Ӷ]ͳx#}xnϏZxR:?&x>Sm,a>~ ߬੩xs膯I?m70Zy9d0F๵R3Թ)%&!lD0ŗsi.YizW{cΏeψ ~;גONpYzж^ч¥+&\?>;4' NRO8ݜ| vB& R/?.]E3gܿ<=7=uMxu!7gߓ%6nENG.7Bg g3=XWh$]+-,d_rsGOiza_yO ͘l <WyAiU46A%|%ǜ5jѩ[|{,[(TXyRa}btjB X~xG:P)*xli\,,5O+";ixQ$[&z9L.%dt]%1ԱnmuLEsM|aዀ5Yk[a}~>ųI {~Zev񝩥`t'~ˇG&Qwr12ͥ!xrqnAO͚-ߴ1\8~e)s]t3^ r\ƻ3#9wX9MV\}ga/8sڱ}j\Cx^c>!]aubvn&޲.s*f_}T^i %əe;M Y+:3qH2[cy[5뽋/z62ft|E 3ַ> EB[E J u4g7oGk:[Oȓ;ϰVVBk#?,"COW߿. U(|<(d @/k9L+C]&ؑ00f;nK?|tdzfad["7~ze37C*ՋvO+T6_ .3W|u=ŕ8Y|i>~PX]|%ZK {TzctwI]<ݗ9/p6}kVO7dh^'EѨo ]9ѷ-4k"2Cp䄏u[ D.7ͮFs^58 =,14BWU.8WҍJw{𛀎ckVjBYݺ~gxf!`F33C {&׍oעIz{U)\? !썠,2sW6+tn%V2?0v}?た^}+$qu.i5K*u?mDm:q5QJuo% 7$RujE&pÿg'Ѝu?#3u/i;]TkV랈O983[<1d6:V6P'Itee[<^%yw3ͪ Ι^r. {JWN!}4XH>&p.$P8!b߷e?(jD~6U֛ }l^u=U:qދ| F_3\z9l7Kn*tob݆ȣֹԷ6:2}WÍ&cyyޛwrNJB'i"ox%$Hzgp]+L7Zg,{d-+x'9J麪/s8yÍCzbз$g"ajA_Lyi&aNI| q*_Nq>`LL7t;g$iy8Y9"6ֲ_1_-tgc~[Kț]^-f2T N"<{AvnJ_ i 89 v-8EǼgoQT~->{W>RWc?E?f=p(S};t^GSEzCp?9b:?CY7˼,_WA(~5]hl 7h@WӠ@@9c@@Ye@Ijjw@N@Q5@ݏ~@5@qҗ@^ "@nđ@`H^A@j?@rE»I@0g*@Y@$4 m@ 6@kV@&Ț@v@ AcS@h\sq@R@bSN@>I@}@diɚ@BGg@GgF@&ƣ@Eɷǟ@[&kf@83Ւ@::@}@ć&@@\=@kM2@ 0jߠ@@lǫ@0CR?@p(u@ @hb@S|@<27@^q@v͟@#-@v@ 'o@Zoޜ@@Z@Ny@i]knipy-0.6.1/nipy/algorithms/diagnostics/tests/test_commands.py000066400000000000000000000227321470056100100245100ustar00rootroot00000000000000""" Testing diagnostics.command module """ import os import shutil from os.path import dirname, isfile from os.path import join as pjoin import nibabel as nib import numpy as np import pytest from nibabel import AnalyzeImage, Nifti1Image, Nifti1Pair, Spm2AnalyzeImage from numpy.testing import assert_array_equal from nipy import load_image from nipy.io.nibcompat import get_header from nipy.io.nifti_ref import NiftiError from nipy.testing import funcfile from nipy.testing.decorators import needs_mpl_agg from ..commands import diagnose, parse_fname_axes, tsdiffana from ..timediff import time_slice_diffs_image def test_parse_fname_axes(in_tmp_path): # Test logic for setting time and slice axis defaults # We need real images for the tests because nipy will load them # For simplicity, we can create them shape = (4, 5, 6, 20) arr = np.arange(np.prod(shape), dtype=float).reshape(shape) zooms = (2., 3., 4., 2.1) for (img_class, ext) in ((AnalyzeImage, '.img'), (Spm2AnalyzeImage, '.img'), (Nifti1Pair, '.img'), (Nifti1Image, '.nii')): hdr = img_class.header_class() hdr.set_data_shape(shape) hdr.set_zooms(zooms) hdr.set_data_dtype(np.dtype(np.float64)) nibabel_img = img_class(arr, None, hdr) # We so far haven't set any slice axis information for z_ext in ('', '.gz'): fname = 'image' + ext + z_ext nib.save(nibabel_img, fname) for in_time, in_sax, out_time, out_sax in ( (None, None, 't', 2), (None, '0', 't', 0), (None, 'i', 't', 'i'), (None, '1', 't', 1), (None, 'j', 't', 'j'), ('k', 'j', 'k', 'j'), ('k', None, 'k', 2)): img, time_axis, slice_axis = parse_fname_axes( fname, in_time, in_sax) assert time_axis == out_time assert slice_axis == out_sax del img # For some images, we can set the slice dimension. This becomes the # default if input slice_axis is None if hasattr(hdr, 'set_dim_info'): for ax_no in range(3): get_header(nibabel_img).set_dim_info(slice=ax_no) nib.save(nibabel_img, fname) img, time_axis, slice_axis = parse_fname_axes(fname, None, None) assert time_axis == 't' assert slice_axis == 'slice' del img # Images other than 4D don't get the slice axis default for new_arr in (arr[..., 0], arr[..., None]): new_nib = img_class(new_arr, None, hdr) nib.save(new_nib, fname) pytest.raises(ValueError, parse_fname_axes, fname, None, None) # But you can still set slice axis img, time_axis, slice_axis = parse_fname_axes(fname, None, 'j') assert time_axis == 't' assert slice_axis == 'j' # Non-analyze image types don't get the slice default nib_data = pjoin(dirname(nib.__file__), 'tests', 'data') mnc_4d_fname = pjoin(nib_data, 'minc1_4d.mnc') if isfile(mnc_4d_fname): pytest.raises(ValueError, parse_fname_axes, mnc_4d_fname, None, None) # At the moment we can't even load these guys try: img, time_axis, slice_axis = parse_fname_axes( mnc_4d_fname, None, 'j') except ValueError: # failed load pytest.skip('Hoping for a time when we can use MINC') # But you can still set slice axis (if we can load them) assert time_axis == 't' assert slice_axis == 'j' class Args: pass def check_axes(axes, img_shape, time_axis, slice_axis): # Check axes as expected for plot assert len(axes) == 4 # First x axis is time point differences assert_array_equal(axes[0].xaxis.get_data_interval(), [0, img_shape[time_axis]-2]) # Last x axis is over slices assert_array_equal(axes[-1].xaxis.get_data_interval(), [0, img_shape[slice_axis]-1]) @pytest.mark.filterwarnings("ignore:" "Default `strict` currently False:" "FutureWarning") @needs_mpl_agg def test_tsdiffana(in_tmp_path): # Test tsdiffana command args = Args() img = load_image(funcfile) args.filename = funcfile args.time_axis = None args.slice_axis = None args.write_results = False args.out_path = None args.out_fname_label = None args.out_file = 'test.png' check_axes(tsdiffana(args), img.shape, -1, -2) assert isfile('test.png') args.time_axis = 't' check_axes(tsdiffana(args), img.shape, -1, -2) args.time_axis = '3' check_axes(tsdiffana(args), img.shape, -1, -2) args.slice_axis = 'k' check_axes(tsdiffana(args), img.shape, -1, -2) args.slice_axis = '2' check_axes(tsdiffana(args), img.shape, -1, -2) args.time_axis = '0' check_axes(tsdiffana(args), img.shape, 0, -2) args.slice_axis = 't' check_axes(tsdiffana(args), img.shape, 0, -1) # Check absolute path works args.slice_axis = 'j' args.time_axis = 't' args.out_file = in_tmp_path / 'test_again.png' check_axes(tsdiffana(args), img.shape, -1, -3) # Check that --out-images incompatible with --out-file args.write_results=True pytest.raises(ValueError, tsdiffana, args) args.out_file=None # Copy the functional file to a temporary writeable directory os.mkdir('mydata') tmp_funcfile = in_tmp_path / 'mydata' / 'myfunc.nii.gz' shutil.copy(funcfile, tmp_funcfile) args.filename = tmp_funcfile # Check write-results generates expected images check_axes(tsdiffana(args), img.shape, -1, -3) assert isfile(pjoin('mydata', 'tsdiff_myfunc.png')) max_img = load_image(pjoin('mydata', 'dv2_max_myfunc.nii.gz')) assert max_img.shape == img.shape[:-1] mean_img = load_image(pjoin('mydata', 'dv2_max_myfunc.nii.gz')) assert mean_img.shape == img.shape[:-1] exp_results = time_slice_diffs_image(img, 't', 'j') saved_results = np.load(pjoin('mydata', 'tsdiff_myfunc.npz')) for key in ('volume_means', 'slice_mean_diff2'): assert_array_equal(exp_results[key], saved_results[key]) # That we can change out-path os.mkdir('myresults') args.out_path = 'myresults' check_axes(tsdiffana(args), img.shape, -1, -3) assert isfile(pjoin('myresults', 'tsdiff_myfunc.png')) max_img = load_image(pjoin('myresults', 'dv2_max_myfunc.nii.gz')) assert max_img.shape == img.shape[:-1] # And out-fname-label args.out_fname_label = 'vr2' check_axes(tsdiffana(args), img.shape, -1, -3) assert isfile(pjoin('myresults', 'tsdiff_vr2.png')) max_img = load_image(pjoin('myresults', 'dv2_max_vr2.nii.gz')) assert max_img.shape == img.shape[:-1] del max_img, mean_img, saved_results def check_diag_results(results, img_shape, time_axis, slice_axis, ncomps, out_path, froot, ext='.nii.gz'): S = img_shape[slice_axis] T = img_shape[time_axis] pca_shape = list(img_shape) pca_shape[time_axis] = ncomps assert results['pca'].shape == tuple(pca_shape) assert (results['pca_res']['basis_projections'].shape == tuple(pca_shape)) # Roll pca axis last to test shape of output image ax_order = list(range(4)) ax_order.remove(time_axis) ax_order.append(time_axis) rolled_shape = tuple(pca_shape[i] for i in ax_order) pca_img = load_image(pjoin(out_path, 'pca_' + froot + ext)) assert pca_img.shape == rolled_shape for prefix in ('mean', 'min', 'max', 'std'): fname = pjoin(out_path, prefix + '_' + froot + ext) img = load_image(fname) assert img.shape == rolled_shape[:-1] vars = np.load(pjoin(out_path, 'vectors_components_' + froot + '.npz')) assert (set(vars) == {'basis_vectors', 'pcnt_var', 'volume_means', 'slice_mean_diff2'}) assert vars['volume_means'].shape == (T,) assert vars['basis_vectors'].shape == (T, T-1) assert vars['slice_mean_diff2'].shape == (T-1, S) @pytest.mark.filterwarnings("ignore:" "Default `strict` currently False:" "FutureWarning") @needs_mpl_agg def test_diagnose(in_tmp_path): args = Args() img = load_image(funcfile) # Copy the functional file to a temporary writeable directory os.mkdir('mydata') tmp_funcfile = in_tmp_path / 'mydata' / 'myfunc.nii.gz' shutil.copy(funcfile, tmp_funcfile) args.filename = tmp_funcfile args.time_axis = None args.slice_axis = None args.out_path = None args.out_fname_label = None args.ncomponents = 10 res = diagnose(args) check_diag_results(res, img.shape, 3, 2, 10, 'mydata', 'myfunc') args.slice_axis = 'j' res = diagnose(args) check_diag_results(res, img.shape, 3, 1, 10, 'mydata', 'myfunc') # Time axis is not going to work because we'd have to use up one of the # needed spatial axes args.time_axis = 'i' pytest.raises(NiftiError, diagnose, args) args.time_axis = 't' # Check that output works os.mkdir('myresults') args.out_path = 'myresults' args.out_fname_label = 'myana' res = diagnose(args) check_diag_results(res, img.shape, 3, 1, 10, 'myresults', 'myana') nipy-0.6.1/nipy/algorithms/diagnostics/tests/test_screen.py000066400000000000000000000142311470056100100241610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing diagnostic screen """ import os from os.path import join as pjoin from warnings import catch_warnings, simplefilter import numpy as np import pytest from numpy.testing import ( assert_almost_equal, assert_array_equal, ) import nipy as ni from nipy.core.api import rollimg from nipy.testing import funcfile from nipy.testing.decorators import needs_mpl_agg from ...utils.pca import pca from ...utils.tests.test_pca import res2pos1 from ..screens import screen, write_screen_res from ..timediff import time_slice_diffs def _check_pca(res, pca_res): # Standardize output vector signs screen_pca_res = res2pos1(res['pca_res']) for key in pca_res: assert_almost_equal(pca_res[key], screen_pca_res[key]) def _check_ts(res, data, time_axis, slice_axis): ts_res = time_slice_diffs(data, time_axis, slice_axis) for key in ts_res: assert_array_equal(ts_res[key], res['ts_res'][key]) def test_screen(): img = ni.load_image(funcfile) # rename third axis to slice to match default of screen # This avoids warnings about future change in default; see the tests for # slice axis below img = img.renamed_axes(k='slice') res = screen(img) assert res['mean'].ndim == 3 assert res['pca'].ndim == 4 assert (sorted(res.keys()) == ['max', 'mean', 'min', 'pca', 'pca_res', 'std', 'ts_res']) data = img.get_fdata() # Check summary images assert_array_equal(np.max(data, axis=-1), res['max'].get_fdata()) assert_array_equal(np.mean(data, axis=-1), res['mean'].get_fdata()) assert_array_equal(np.min(data, axis=-1), res['min'].get_fdata()) assert_array_equal(np.std(data, axis=-1), res['std'].get_fdata()) pca_res = pca(data, axis=-1, standardize=False, ncomp=10) # On windows, there seems to be some randomness in the PCA output vector # signs; this routine sets the basis vectors to have first value positive, # and therefore standardizes the signs pca_res = res2pos1(pca_res) _check_pca(res, pca_res) _check_ts(res, data, 3, 2) # Test that screens accepts and uses time axis data_mean = data.mean(axis=-1) res = screen(img, time_axis='t') assert_array_equal(data_mean, res['mean'].get_fdata()) _check_pca(res, pca_res) _check_ts(res, data, 3, 2) res = screen(img, time_axis=-1) assert_array_equal(data_mean, res['mean'].get_fdata()) _check_pca(res, pca_res) _check_ts(res, data, 3, 2) t0_img = rollimg(img, 't') t0_data = np.rollaxis(data, -1) res = screen(t0_img, time_axis='t') t0_pca_res = pca(t0_data, axis=0, standardize=False, ncomp=10) t0_pca_res = res2pos1(t0_pca_res) assert_array_equal(data_mean, res['mean'].get_fdata()) _check_pca(res, t0_pca_res) _check_ts(res, t0_data, 0, 3) res = screen(t0_img, time_axis=0) assert_array_equal(data_mean, res['mean'].get_fdata()) _check_pca(res, t0_pca_res) _check_ts(res, t0_data, 0, 3) # Check screens uses slice axis s0_img = rollimg(img, 2, 0) s0_data = np.rollaxis(data, 2, 0) res = screen(s0_img, slice_axis=0) _check_ts(res, s0_data, 3, 0) # And defaults to named slice axis # First re-show that when we don't specify, we get the default res = screen(img) _check_ts(res, data, 3, 2) pytest.raises(AssertionError, _check_ts, res, data, 3, 0) # Then specify, get non-default slicey_img = img.renamed_axes(slice='k', i='slice') res = screen(slicey_img) _check_ts(res, data, 3, 0) pytest.raises(AssertionError, _check_ts, res, data, 3, 2) def pca_pos(data4d): """ Flips signs equal over volume for PCA Needed because Windows appears to generate random signs for PCA components across PCA runs on the same data. """ signs = np.sign(data4d[0, 0, 0, :]) return data4d * signs def test_screen_slice_axis(): img = ni.load_image(funcfile) # Default screen raises a FutureWarning because of the default slice_axis exp_res = screen(img, slice_axis='k') with catch_warnings(): simplefilter('error') pytest.raises(FutureWarning, screen, img) pytest.raises(FutureWarning, screen, img, slice_axis=None) explicit_img = img.renamed_axes(k='slice') # Now the analysis works without warning res = screen(explicit_img) # And is the expected analysis # Very oddly on scipy 0.9 32 bit - at least - results differ between # runs, so we need assert_almost_equal assert_almost_equal(pca_pos(res['pca'].get_fdata()), pca_pos(exp_res['pca'].get_fdata())) assert_array_equal(res['ts_res']['slice_mean_diff2'], exp_res['ts_res']['slice_mean_diff2']) # Turn off warnings, also get expected analysis simplefilter('ignore') res = screen(img) assert_array_equal(res['ts_res']['slice_mean_diff2'], exp_res['ts_res']['slice_mean_diff2']) @needs_mpl_agg def test_write_screen_res(in_tmp_path): img = ni.load_image(funcfile) res = screen(img) os.mkdir('myresults') write_screen_res(res, 'myresults', 'myana') pca_img = ni.load_image(pjoin('myresults', 'pca_myana.nii')) assert pca_img.shape == img.shape[:-1] + (10,) # Make sure we get the same output image even from rolled image # Do fancy roll to put time axis first, and slice axis last. This does # a stress test on the axis ordering, but also makes sure that we are # getting the number of components from the right place. If we were # getting the number of components from the length of the last axis, # instead of the length of the 't' axis in the returned pca image, this # would be wrong (=21) which would also be more than the number of # basis vectors (19) so raise an error rimg = img.reordered_axes([3, 2, 0, 1]) os.mkdir('rmyresults') rres = screen(rimg) write_screen_res(rres, 'rmyresults', 'myana') rpca_img = ni.load_image(pjoin('rmyresults', 'pca_myana.nii')) assert rpca_img.shape == img.shape[:-1] + (10,) del pca_img, rpca_img nipy-0.6.1/nipy/algorithms/diagnostics/tests/test_time_difference.py000066400000000000000000000162521470056100100260170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing tsdiffana """ from os.path import dirname from os.path import join as pjoin import numpy as np import pytest import scipy.io as sio from numpy.testing import assert_array_almost_equal, assert_array_equal from nipy import load_image from nipy.testing import funcfile from ....core.api import rollimg from ....core.reference.coordinate_map import AxisError from .. import timediff as tsd TEST_DATA_PATH = pjoin(dirname(__file__), 'data') def test_time_slice_diffs(): n_tps = 10 n_slices = 4 slice_shape = (2,3) slice_size = np.prod(slice_shape) vol_shape = slice_shape + (n_slices,) vol_size = np.prod(vol_shape) ts = np.random.normal(size=vol_shape + (n_tps,)) * 100 + 10 expected = {} expected['volume_means'] = ts.reshape((vol_size, -1)).mean(0) # difference over time ^2 diffs2 = np.diff(ts, axis=-1)**2 expected['volume_mean_diff2'] = np.mean( diffs2.reshape((vol_size, -1)), 0) expected['slice_mean_diff2'] = np.zeros((n_tps-1, n_slices)) for s in range(n_slices): v = diffs2[:,:,s,:].reshape((slice_size, -1)) expected['slice_mean_diff2'][:,s] = np.mean(v, 0) expected['diff2_mean_vol'] = np.mean(diffs2, -1) max_diff_is = np.argmax(expected['slice_mean_diff2'], 0) sdmv = np.empty(vol_shape) for si, dti in enumerate(max_diff_is): sdmv[:,:,si] = diffs2[:,:,si,dti] expected['slice_diff2_max_vol'] = sdmv results = tsd.time_slice_diffs(ts) for key in expected: assert_array_almost_equal(results[key], expected[key]) # transposes, reset axes, get the same result results = tsd.time_slice_diffs(ts.T, 0, 1) results['diff2_mean_vol'] = results['diff2_mean_vol'].T results['slice_diff2_max_vol'] = results['slice_diff2_max_vol'].T for key in expected: assert_array_almost_equal(results[key], expected[key]) ts_t = ts.transpose((1, 3, 0, 2)) results = tsd.time_slice_diffs(ts_t, 1, -1) results['diff2_mean_vol'] = results['diff2_mean_vol'].transpose( (1,0,2)) results['slice_diff2_max_vol'] = results['slice_diff2_max_vol'].transpose( (1,0,2)) for key in expected: assert_array_almost_equal(results[key], expected[key]) def test_time_slice_axes(): # Test time and slice axes work as expected fimg = load_image(funcfile) # Put into array data = fimg.get_fdata() orig_results = tsd.time_slice_diffs(data) t0_data = np.rollaxis(data, 3) t0_results = tsd.time_slice_diffs(t0_data, 0) for key in ('volume_means', 'slice_mean_diff2'): assert_array_almost_equal(orig_results[key], t0_results[key]) s0_data = np.rollaxis(data, 2) s0_results = tsd.time_slice_diffs(s0_data, slice_axis=0) for key in ('volume_means', 'slice_mean_diff2'): assert_array_almost_equal(orig_results[key], s0_results[key]) # Incorrect slice axis bad_s0_results = tsd.time_slice_diffs(s0_data) assert (orig_results['slice_mean_diff2'].shape != bad_s0_results['slice_mean_diff2'].shape) # Slice axis equal to time axis - ValueError pytest.raises(ValueError, tsd.time_slice_diffs, data, -1, -1) pytest.raises(ValueError, tsd.time_slice_diffs, data, -1, 3) pytest.raises(ValueError, tsd.time_slice_diffs, data, 1, 1) pytest.raises(ValueError, tsd.time_slice_diffs, data, 1, -3) def test_against_matlab_results(): fimg = load_image(funcfile) results = tsd.time_slice_diffs(fimg.get_fdata()) # struct as record only to avoid deprecation warning tsd_results = sio.loadmat(pjoin(TEST_DATA_PATH, 'tsdiff_results.mat'), struct_as_record=True, squeeze_me=True) assert_array_almost_equal(results['volume_means'], tsd_results['g']) assert_array_almost_equal(results['volume_mean_diff2'], tsd_results['imgdiff']) assert_array_almost_equal(results['slice_mean_diff2'], tsd_results['slicediff']) # next tests are from saved, reloaded volumes at 16 bit integer # precision, so are not exact, but very close, given that the mean # of this array is around 3200 assert_array_almost_equal(results['diff2_mean_vol'], tsd_results['diff2_mean_vol'], decimal=1) assert_array_almost_equal(results['slice_diff2_max_vol'], tsd_results['slice_diff2_max_vol'], decimal=1) def assert_arr_img_res(arr_res, img_res): for key in ('volume_mean_diff2', 'slice_mean_diff2', 'volume_means'): assert_array_equal(arr_res[key], img_res[key]) for key in ('slice_diff2_max_vol', 'diff2_mean_vol'): assert_array_almost_equal(arr_res[key], img_res[key].get_fdata()) def test_tsd_image(): # Test image version of time slice diff fimg = load_image(funcfile) data = fimg.get_fdata() tsda = tsd.time_slice_diffs tsdi = tsd.time_slice_diffs_image arr_results = tsda(data) # image routine insists on named slice axis, no default pytest.raises(AxisError, tsdi, fimg) # Works when specifying slice axis as keyword argument img_results = tsdi(fimg, slice_axis='k') assert_arr_img_res(arr_results, img_results) ax_names = fimg.coordmap.function_domain.coord_names # Test against array version for time_ax in range(4): time_name = ax_names[time_ax] for slice_ax in range(4): slice_name = ax_names[slice_ax] if time_ax == slice_ax: pytest.raises(ValueError, tsda, data, time_ax, slice_ax) pytest.raises(ValueError, tsdi, fimg, time_ax, slice_ax) pytest.raises(ValueError, tsdi, fimg, time_name, slice_ax) pytest.raises(ValueError, tsdi, fimg, time_ax, slice_name) pytest.raises(ValueError, tsdi, fimg, time_name, slice_name) continue arr_res = tsda(data, time_ax, slice_ax) assert_arr_img_res(arr_res, tsdi(fimg, time_ax, slice_ax)) assert_arr_img_res(arr_res, tsdi(fimg, time_name, slice_ax)) assert_arr_img_res(arr_res, tsdi(fimg, time_ax, slice_name)) img_results = tsdi(fimg, time_name, slice_name) assert_arr_img_res(arr_res, img_results) exp_ax_names = tuple(n for n in ax_names if n != time_name) for key in ('slice_diff2_max_vol', 'diff2_mean_vol'): img = img_results[key] assert (img.coordmap.function_domain.coord_names == exp_ax_names) # Test defaults on rolled image fimg_rolled = rollimg(fimg, 't') # Still don't have a slice axis specified pytest.raises(AxisError, tsdi, fimg_rolled) # Test default time axis assert_arr_img_res(arr_results, tsdi(fimg_rolled, slice_axis='k')) # Test axis named slice overrides default guess time_ax = -1 for sa_no, sa_name in ((0, 'i'), (1, 'j'), (2, 'k')): fimg_renamed = fimg.renamed_axes(**{sa_name: 'slice'}) arr_res = tsda(data, time_ax, sa_no) assert_arr_img_res(arr_res, tsdi(fimg_renamed, time_ax)) nipy-0.6.1/nipy/algorithms/diagnostics/timediff.py000066400000000000000000000201111470056100100222620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Time series diagnostics These started life as ``tsdiffana.m`` - see http://imaging.mrc-cbu.cam.ac.uk/imaging/DataDiagnostics Oliver Josephs (FIL) gave me (MB) the idea of time-point to time-point subtraction as a diagnostic for motion and other sudden image changes. ''' import numpy as np from ...core.reference.coordinate_map import AxisError, drop_io_dim, io_axis_indices from ...io.api import as_image def time_slice_diffs(arr, time_axis=-1, slice_axis=None): ''' Time-point to time-point differences over volumes and slices We think of the passed array as an image. The image has a "time" dimension given by `time_axis` and a "slice" dimension, given by `slice_axis`, and one or more other dimensions. In the case of imaging there will usually be two more dimensions (the dimensions defining the size of an image slice). A single slice in the time dimension we call a "volume". A single entry in `arr` is a "voxel". For example, if `time_axis` == 0, then ``v = arr[0]`` would be the first volume in the series. The volume ``v`` above has ``v.size`` voxels. If, in addition, `slice_axis` == 1, then for the volume ``v`` (above) ``s = v[0]`` would be a "slice", with ``s.size`` voxels. These are obviously terms from neuroimaging. Parameters ---------- arr : array_like Array over which to calculate time and slice differences. We'll call this array an 'image' in this doc. time_axis : int, optional axis of `arr` that varies over time. Default is last slice_axis : None or int, optional axis of `arr` that varies over image slice. None gives last non-time axis. Returns ------- results : dict ``T`` is the number of time points (``arr.shape[time_axis]``) ``S`` is the number of slices (``arr.shape[slice_axis]``) ``v`` is the shape of a volume (``rollimg(arr, time_axis)[0].shape``) ``d2[t]`` is the volume of squared differences between voxels at time point ``t`` and time point ``t+1`` `results` has keys: * 'volume_mean_diff2' : (T-1,) array array containing the mean (over voxels in volume) of the squared difference from one time point to the next * 'slice_mean_diff2' : (T-1, S) array giving the mean (over voxels in slice) of the difference from one time point to the next, one value per slice, per timepoint * 'volume_means' : (T,) array mean over voxels for each volume ``vol[t] for t in 0:T`` * 'slice_diff2_max_vol' : v[:] array volume, of same shape as input time point volumes, where each slice is is the slice from ``d2[t]`` for t in 0:T-1, that has the largest variance across ``t``. Thus each slice in the volume may well result from a different difference time point. * 'diff2_mean_vol`` : v[:] array volume with the mean of ``d2[t]`` across t for t in 0:T-1. Raises ------ ValueError : if `time_axis` refers to same axis as `slice_axis` ''' arr = np.asarray(arr) ndim = arr.ndim # roll time axis to 0, slice axis to 1 for convenience if time_axis < 0: time_axis += ndim if slice_axis is None: slice_axis = ndim-2 if time_axis == ndim-1 else ndim-1 elif slice_axis < 0: slice_axis += ndim if time_axis == slice_axis: raise ValueError('Time axis refers to same axis as slice axis') arr = np.rollaxis(arr, time_axis) # we may have changed the position of slice_axis if time_axis > slice_axis: slice_axis += 1 arr = np.rollaxis(arr, slice_axis, 1) # shapes of things shape = arr.shape T = shape[0] S = shape[1] vol_shape = shape[1:] # loop over time points to save memory volds = np.empty((T-1,)) sliceds = np.empty((T-1,S)) means = np.empty((T,)) diff_mean_vol = np.zeros(vol_shape) slice_diff_max_vol = np.zeros(vol_shape) slice_diff_maxes = np.zeros(S) last_tp = arr[0] means[0] = last_tp.mean() for dtpi in range(T-1): tp = arr[dtpi+1] # shape vol_shape means[dtpi+1] = tp.mean() dtp_diff2 = (tp - last_tp)**2 diff_mean_vol += dtp_diff2 sliceds[dtpi] = dtp_diff2.reshape(S, -1).mean(-1) # check whether we have found a highest-diff slice sdmx_higher = sliceds[dtpi] > slice_diff_maxes if any(sdmx_higher): slice_diff_maxes[sdmx_higher] = sliceds[dtpi][sdmx_higher] slice_diff_max_vol[sdmx_higher] = dtp_diff2[sdmx_higher] last_tp = tp volds = sliceds.mean(1) diff_mean_vol /= (T-1) # roll vol shapes back to match input diff_mean_vol = np.rollaxis(diff_mean_vol, 0, slice_axis) slice_diff_max_vol = np.rollaxis(slice_diff_max_vol, 0, slice_axis) return {'volume_mean_diff2': volds, 'slice_mean_diff2': sliceds, 'volume_means': means, 'diff2_mean_vol': diff_mean_vol, 'slice_diff2_max_vol': slice_diff_max_vol} def time_slice_diffs_image(img, time_axis='t', slice_axis='slice'): """ Time-point to time-point differences over volumes and slices of image Parameters ---------- img : Image The image on which to perform time-point differences time_axis : str or int, optional Axis indexing time-points. Default is 't'. If `time_axis` is an integer, gives the index of the input (domain) axis of `img`. If `time_axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. slice_axis : str or int, optional Axis indexing MRI slices. If `slice_axis` is an integer, gives the index of the input (domain) axis of `img`. If `slice_axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. Returns ------- results : dict `arr` refers to the array as loaded from `img` ``T`` is the number of time points (``img.shape[time_axis]``) ``S`` is the number of slices (``img.shape[slice_axis]``) ``v`` is the shape of a volume (``rollimg(img, time_axis)[0].shape``) ``d2[t]`` is the volume of squared differences between voxels at time point ``t`` and time point ``t+1`` `results` has keys: * 'volume_mean_diff2' : (T-1,) array array containing the mean (over voxels in volume) of the squared difference from one time point to the next * 'slice_mean_diff2' : (T-1, S) array giving the mean (over voxels in slice) of the difference from one time point to the next, one value per slice, per timepoint * 'volume_means' : (T,) array mean over voxels for each volume ``vol[t] for t in 0:T`` * 'slice_diff2_max_vol' : v[:] image image volume, of same shape as input time point volumes, where each slice is is the slice from ``d2[t]`` for t in 0:T-1, that has the largest variance across ``t``. Thus each slice in the volume may well result from a different difference time point. * 'diff2_mean_vol`` : v[:] image image volume with the mean of ``d2[t]`` across t for t in 0:T-1. """ img = as_image(img) img_class = img.__class__ time_in_ax, time_out_ax = io_axis_indices(img.coordmap, time_axis) if None in (time_in_ax, time_out_ax): raise AxisError(f'Cannot identify matching input output axes with "{time_axis}"') slice_in_ax, slice_out_ax = io_axis_indices(img.coordmap, slice_axis) if None in (slice_in_ax, slice_out_ax): raise AxisError(f'Cannot identify matching input output axes with "{slice_axis}"') vol_coordmap = drop_io_dim(img.coordmap, time_axis) results = time_slice_diffs(img.get_fdata(), time_in_ax, slice_in_ax) for key in ('slice_diff2_max_vol', 'diff2_mean_vol'): vol = img_class(results[key], vol_coordmap) results[key] = vol return results nipy-0.6.1/nipy/algorithms/diagnostics/tsdiffplot.py000066400000000000000000000067421470056100100226670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' plot tsdiffana parameters ''' import numpy as np import nipy from ...utils import deprecate_with_doc from .timediff import time_slice_diffs def plot_tsdiffs(results, axes=None): ''' Plotting routine for time series difference metrics Requires matplotlib Parameters ---------- results : dict Results of format returned from :func:`nipy.algorithms.diagnostics.time_slice_diff` ''' import matplotlib.pyplot as plt T = len(results['volume_means']) S = results['slice_mean_diff2'].shape[1] mean_means = np.mean(results['volume_means']) scaled_slice_diff = results['slice_mean_diff2'] / mean_means if axes is None: n_plots = 4 fig = plt.figure() fig.set_size_inches([10,10]) axes = [plt.subplot(n_plots, 1, i+1) for i in range(n_plots)] def xmax_labels(ax, val, xlabel, ylabel): xlims = ax.axis() ax.axis((0, val) + xlims[2:]) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) # plot of mean volume variance ax = axes[0] ax.plot(results['volume_mean_diff2'] / mean_means) xmax_labels(ax, T-1, 'Difference image number', 'Scaled variance') # plot of diff by slice ax = axes[1] #Set up the color map for the different slices: X, Y = np.meshgrid(np.arange(scaled_slice_diff.shape[0]), np.arange(scaled_slice_diff.shape[1])) # Use HSV in order to code the slices from bottom to top: ax.scatter(X.T.ravel(),scaled_slice_diff.ravel(), c=Y.T.ravel(),cmap=plt.cm.hsv, alpha=0.2) xmax_labels(ax, T-1, 'Difference image number', 'Slice by slice variance') # mean intensity ax = axes[2] ax.plot(results['volume_means'] / mean_means) xmax_labels(ax, T, 'Image number', 'Scaled mean \n voxel intensity') # slice plots min max mean ax = axes[3] ax.plot(np.mean(scaled_slice_diff, 0), 'k') ax.plot(np.min(scaled_slice_diff, 0), 'b') ax.plot(np.max(scaled_slice_diff, 0), 'r') xmax_labels(ax, S+1, 'Slice number', 'Max/mean/min \n slice variation') return axes @deprecate_with_doc('please see docstring for alternative code') def plot_tsdiffs_image(img, axes=None, show=True): ''' Plot time series diagnostics for image This function is deprecated; please use something like:: results = time_slice_diff_image(img, slice_axis=2) plot_tsdiffs(results) instead. Parameters ---------- img : image-like or filename str image on which to do diagnostics axes : None or sequence, optional Axes on which to plot the diagnostics. If None, then we create a figure and subplots for the plots. Sequence should have length >=4. show : {True, False}, optional If True, show the figure after plotting it Returns ------- axes : Matplotlib axes Axes on which we have done the plots. Will be same as `axes` input if `axes` input was not None ''' if isinstance(img, str): title = img else: title = 'Difference plots' img = nipy.as_image(img) res = time_slice_diffs(img) axes = plot_tsdiffs(res, axes) axes[0].set_title(title) if show: # show the plot import matplotlib.pyplot as plt plt.show() return axes nipy-0.6.1/nipy/algorithms/fwhm.py000066400000000000000000000137241470056100100171410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides classes and definitions for using full width at half maximum (FWHM) to be used in conjunction with Gaussian Random Field Theory to determine resolution elements (resels). A resolution element (resel) is defined as a block of pixels of the same size as the FWHM of the smoothed image. There are two methods implemented to estimate (3d, or volumewise) FWHM based on a 4d Image: fastFHWM: used if the entire 4d Image is available iterFWHM: used when 4d Image is being filled in by slices of residuals """ __docformat__ = 'restructuredtext' import numpy as np from numpy.linalg import det from nipy.core.api import Image from .utils.matrices import pos_recipr class Resels: """The Resels class. """ def __init__(self, coordmap, normalized=False, fwhm=None, resels=None, mask=None, clobber=False, D=3): """ Initialize resels class Parameters ---------- coordmap : ``CoordinateMap`` CoordinateMap over which fwhm and resels are to be estimated. Used in fwhm/resel conversion. fwhm : ``Image`` Optional Image of FWHM. Used to convert FWHM Image to resels if FWHM is not being estimated. resels : ``Image`` Optional Image of resels. Used to compute resels within a mask, for instance, if FWHM has already been estimated. mask : ``Image`` Mask over which to integrate resels. clobber : ``bool`` Clobber output FWHM and resel images? D : ``int`` Can be 2 or 3, the dimension of the final volume. """ self.fwhm = fwhm self.resels = resels self.mask = mask self.clobber = clobber self.coordmap = coordmap self.D = D self.normalized = normalized _transform = self.coordmap.affine self.wedge = np.power(np.fabs(det(_transform)), 1./self.D) def integrate(self, mask=None): """ Integrate resels within `mask` (or use self.mask) Parameters ---------- mask : ``Image`` Optional mask over which to integrate (add) resels. Returns ------- total_resels : the resels contained in the mask FWHM : float an estimate of FWHM based on the average resel per voxel nvoxel: int the number of voxels in the mask """ _resels = self.resels[:] if mask is not None: _mask = mask else: _mask = self.mask if _mask is not None: _mask = _mask[:].astype(np.int32) nvoxel = _mask.sum() else: _mask = 1. nvoxel = _resels.size _resels = (_resels * _mask).sum() _fwhm = self.resel2fwhm(_resels / nvoxel) return _resels, _fwhm, nvoxel def resel2fwhm(self, resels): """ Convert resels as `resels` to isotropic FWHM Parameters ---------- resels : float Convert a resel value to an equivalent isotropic FWHM based on step sizes in self.coordmap. Returns ------- fwhm : float """ return np.sqrt(4*np.log(2.)) * self.wedge * pos_recipr(np.power(resels, 1./self.D)) def fwhm2resel(self, fwhm): """ Convert FWHM `fwhm` to equivalent reseels per voxel Parameters ---------- fwhm : float Convert an FWHM value to an equivalent resels per voxel based on step sizes in self.coordmap. Returns ------- resels : float """ return pos_recipr(np.power(fwhm / np.sqrt(4*np.log(2)) * self.wedge, self.D)) def __iter__(self): """ Return iterator Returns ------- itor : iterator self """ if not self.fwhm: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.fwhm, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.fwhm = im if not self.resels: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.resels, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.resels = im return self class ReselImage(Resels): def __init__(self, resels=None, fwhm=None, **keywords): """ Initialize resel image Parameters ---------- resels : `core.api.Image` Image of resel per voxel values. fwhm : `core.api.Image` Image of FWHM values. keywords : ``dict`` Passed as keywords arguments to `core.api.Image` """ if not resels and not fwhm: raise ValueError('need either a resels image or an FWHM image') if fwhm is not None: fwhm = Image(fwhm, **keywords) Resels.__init__(self, fwhm, resels=resels, fwhm=fwhm) if resels is not None: resels = Image(resels, **keywords) Resels.__init__(self, resels, resels=resels, fwhm=fwhm) if not self.fwhm: self.fwhm = Image(self.resel2fwhm(self.resels[:]), coordmap=self.coordmap, **keywords) if not self.resels: self.resels = Image(self.fwhm2resel(self.fwhm[:]), coordmap=self.coordmap, **keywords) def __iter__(self): """ Return iterator Returns ------- itor : iterator ``self`` """ return self def _calc_detlam(xx, yy, zz, yx, zx, zy): """ Calculate determinant of symmetric 3x3 matrix [[xx,yx,xz], [yx,yy,zy], [zx,zy,zz]] """ return zz * (yy*xx - yx**2) - \ zy * (zy*xx - zx*yx) + \ zx * (zy*yx - zx*yy) nipy-0.6.1/nipy/algorithms/graph/000077500000000000000000000000001470056100100167205ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/graph/__init__.py000066400000000000000000000005411470056100100210310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .graph import ( Graph, WeightedGraph, complete_graph, concatenate_graphs, eps_nn, graph_3d_grid, knn, lil_cc, mst, wgraph_from_3d_grid, wgraph_from_adjacency, wgraph_from_coo_matrix, ) nipy-0.6.1/nipy/algorithms/graph/_graph.pyx000066400000000000000000000014701470056100100207240ustar00rootroot00000000000000cimport numpy as cnp cimport cython ctypedef cnp.float64_t DOUBLE ctypedef cnp.intp_t INT @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def dilation(cnp.ndarray[DOUBLE, ndim=2] field,\ cnp.ndarray[INT, ndim=1] idx,\ cnp.ndarray[INT, ndim=1] neighb): cdef int size_max = field.shape[0] cdef int dim = field.shape[1] cdef int i, j, d cdef DOUBLE fmax cdef cnp.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] for d in range(dim): for i in range(size_max): fmax = field[i, d] for j in range(idx[i], idx[i + 1]): if field[neighb[j], d] > fmax: fmax = field[neighb[j], d] res[i] = fmax for i in range(size_max): field[i, d] = res[i] return res nipy-0.6.1/nipy/algorithms/graph/bipartite_graph.py000066400000000000000000000222051470056100100224370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements the BipartiteGraph class, used to represent weighted bipartite graph: it contains two types of vertices, say 'left' and 'right'; then edges can only exist between 'left' and 'right' vertices. For simplicity the vertices of either side are labeled [1..V] and [1..W] respectively. Author: Bertrand Thirion, 2006--2011 """ import numpy as np def check_feature_matrices(X, Y): """ checks whether the dimensions of X and Y are consistent Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features """ if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) if np.size(Y) == Y.shape[0]: Y = np.reshape(Y, (np.size(Y), 1)) if X.shape[1] != Y.shape[1]: raise ValueError('X.shape[1] should = Y.shape[1]') def bipartite_graph_from_coo_matrix(x): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: scipy.sparse.coo_matrix instance, the input matrix Returns ------- bg: BipartiteGraph instance """ i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data wg = BipartiteGraph(x.shape[0], x.shape[1], edges, weights) return wg def bipartite_graph_from_adjacency(x): """Instantiates a weighted graph from a square 2D array Parameters ---------- x: 2D array instance, the input array Returns ------- wg: BipartiteGraph instance """ from scipy.sparse import coo_matrix return bipartite_graph_from_coo_matrix(coo_matrix(x)) def cross_eps(X, Y, eps=1.): """Return the eps-neighbours graph of from X to Y Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features eps=1, float: the neighbourhood size considered Returns ------- the resulting bipartite graph instance Notes ----- for the sake of speed it is advisable to give PCA-preprocessed matrices X and Y. """ from scipy.sparse import coo_matrix check_feature_matrices(X, Y) try: eps = float(eps) except: "eps cannot be cast to a float" if np.isnan(eps): raise ValueError('eps is nan') if np.isinf(eps): raise ValueError('eps is inf') ij = np.zeros((0, 2)) data = np.zeros(0) for i, x in enumerate(X): dist = np.sum((Y - x) ** 2, 1) idx = np.asanyarray(np.where(dist < eps)) data = np.hstack((data, dist[idx.ravel()])) ij = np.vstack((ij, np.hstack(( i * np.ones((idx.size, 1)), idx.T)))).astype(np.int_) data = np.maximum(data, 1.e-15) adj = coo_matrix((data, ij.T), shape=(X.shape[0], Y.shape[0])) return bipartite_graph_from_coo_matrix(adj) def cross_knn(X, Y, k=1): """return the k-nearest-neighbours graph of from X to Y Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features eps=1, float: the neighbourhood size considered Returns ------- BipartiteGraph instance Notes ----- For the sake of speed it is advised to give PCA-transformed matrices X and Y. """ from scipy.sparse import coo_matrix check_feature_matrices(X, Y) try: k = int(k) except: "k cannot be cast to an int" if np.isnan(k): raise ValueError('k is nan') if np.isinf(k): raise ValueError('k is inf') k = min(k, Y.shape[0] -1) ij = np.zeros((0, 2)) data = np.zeros(0) for i, x in enumerate(X): dist = np.sum((Y - x) ** 2, 1) idx = np.argsort(dist)[:k] data = np.hstack((data, dist[idx])) ij = np.vstack((ij, np.hstack(( i * np.ones((k, 1)), np.reshape(idx, (k, 1)))))) data = np.maximum(data, 1.e-15) adj = coo_matrix((data, ij.T), shape=(X.shape[0], Y.shape[0])) return bipartite_graph_from_coo_matrix(adj) class BipartiteGraph: """ Bipartite graph class A graph for which there are two types of nodes, such that edges can exist only between nodes of type 1 and type 2 (not within) fields of this class: V (int, > 0) the number of type 1 vertices W (int, > 0) the number of type 2 vertices E: (int) the number of edges edges: array of shape (self.E, 2) reprensenting pairwise neighbors weights, array of shape (self.E), +1/-1 for scending/descending links """ def __init__(self, V, W, edges=None, weights=None): """ Constructor Parameters ---------- V (int), the number of vertices of subset 1 W (int), the number of vertices of subset 2 edges=None: array of shape (self.E, 2) the edge array of the graph weights=None: array of shape (self.E) the associated weights array """ V = int(V) W = int(W) if (V < 1) or (W < 1): raise ValueError('cannot create graph with no vertex') self.V = V self.W = W self.E = 0 if (edges is None) & (weights is None): self.edges = np.array([], np.int_) self.weights = np.array([]) else: if edges.shape[0] == np.size(weights): E = edges.shape[0] self.E = E self.edges = - np.ones((E, 2), np.int_) self.set_edges(edges) self.set_weights(weights) else: raise ValueError('Incompatible size of the edges and \ weights matrices') def set_weights(self, weights): """ Set weights `weights` to edges Parameters ---------- weights, array of shape(self.V): edges weights """ if np.size(weights) != self.E: raise ValueError('The weight size is not the edges size') else: self.weights = np.reshape(weights, (self.E)) def set_edges(self, edges): """ Set edges to graph sets self.edges=edges if 1. edges has a correct size 2. edges take values in [0..V-1]*[0..W-1] Parameters ---------- edges: array of shape(self.E, 2): set of candidate edges """ if np.shape(edges) != np.shape(self.edges): raise ValueError('Incompatible size of the edge matrix') if np.size(edges) > 0: if edges.max(0)[0] + 1 > self.V: raise ValueError('Incorrect edge specification') if edges.max(0)[1] + 1 > self.W: raise ValueError('Incorrect edge specification') self.edges = edges def copy(self): """ returns a copy of self """ G = BipartiteGraph(self.V, self.W, self.edges.copy(), self.weights.copy()) return G def subgraph_left(self, valid, renumb=True): """Extraction of a subgraph Parameters ---------- valid, boolean array of shape self.V renumb, boolean: renumbering of the (left) edges Returns ------- G : None or ``BipartiteGraph`` instance A new BipartiteGraph instance with only the left vertices that are True. If sum(valid)==0, None is returned """ if np.size(valid) != self.V: raise ValueError('valid does not have the correct size') if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = valid[self.edges[:, 0]] edges = self.edges[win_edges] weights = self.weights[win_edges] if renumb: rindex = np.hstack((0, np.cumsum(valid > 0))) edges[:, 0] = rindex[edges[:, 0]] G = BipartiteGraph(np.sum(valid), self.W, edges, weights) else: G = BipartiteGraph(self.V, self.W, edges, weights) else: G = self.copy() return G def subgraph_right(self, valid, renumb=True): """ Extraction of a subgraph Parameters ---------- valid : bool array of shape self.V renumb : bool, optional renumbering of the (right) edges Returns ------- G : None or ``BipartiteGraph`` instance. A new BipartiteGraph instance with only the right vertices that are True. If sum(valid)==0, None is returned """ if np.size(valid) != self.V: raise ValueError('valid does not have the correct size') if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = valid[self.edges[:, 1]] edges = self.edges[win_edges] weights = self.weights[win_edges] if renumb: rindex = np.hstack((0, np.cumsum(valid > 0))) edges[:, 1] = rindex[edges[:, 1]] G = BipartiteGraph(self.V, np.sum(valid), edges, weights) else: G = BipartiteGraph(self.V, self.W, edges, weights) else: G = self.copy() return G nipy-0.6.1/nipy/algorithms/graph/field.py000066400000000000000000000462601470056100100203650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements the Field class, which simply a WeightedGraph (see the graph.py) module, plus an array that yields (possibly multi-dimnesional) features associated with graph vertices. This allows some kinds of computations (all those relating to mathematical morphology, diffusion etc.) Certain functions are provided to Instantiate Fields easily, given a WeightedGraph and feature data. Author:Bertrand Thirion, 2006--2011 """ from warnings import warn import numpy as np from .graph import Graph, WeightedGraph NEGINF = -np.inf def field_from_coo_matrix_and_data(x, data): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting Field instance """ if x.shape[0] != x.shape[1]: raise ValueError("the input coo_matrix is not square") if data.shape[0] != x.shape[0]: raise ValueError("data and x do not have consistent shapes") i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data ifield = Field(x.shape[0], edges, weights, data) return ifield def field_from_graph_and_data(g, data): """ Instantiate a Fieldfrom a WeightedGraph plus some feature data Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting field instance """ if data.shape[0] != g.V: raise ValueError("data and g do not have consistent shapes") ifield = Field(g.V, g.edges, g.weights, data) return ifield class Field(WeightedGraph): """ This is the basic field structure, which contains the weighted graph structure plus an array of data (the 'field') field is an array of size(n, p) where n is the number of vertices of the graph and p is the field dimension """ def __init__(self, V, edges=None, weights=None, field=None): """ Parameters ---------- V (int > 0) the number of vertices of the graph edges=None: the edge array of the graph weights=None: the associated weights array field=None: the field data itself """ V = int(V) if V < 1: raise ValueError('cannot create graph with no vertex') self.V = int(V) self.E = 0 self.edges = [] self.weights = [] if (edges is not None) or (weights is not None): if len(edges) == 0: E = 0 elif edges.shape[0] == np.size(weights): E = edges.shape[0] else: raise ValueError('Incompatible size of the edges \ and weights matrices') self.V = V self.E = E self.edges = edges self.weights = weights self.field = [] if field is None: pass else: if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def get_field(self): return self.field def set_field(self, field): if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def closing(self, nbiter=1): """Morphological closing of the field data. self.field is changed inplace Parameters ---------- nbiter=1 : the number of iterations required """ nbiter = int(nbiter) self.dilation(nbiter) self.erosion(nbiter) def opening(self, nbiter=1): """Morphological opening of the field data. self.field is changed inplace Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) self.erosion(nbiter) self.dilation(nbiter) def dilation(self, nbiter=1, fast=True): """Morphological dilation of the field data, changed in place Parameters ---------- nbiter: int, optional, the number of iterations required Notes ----- When data dtype is not float64, a slow version of the code is used """ nbiter = int(nbiter) if self.field.dtype != np.float64: warn('data type is not float64; a slower version is used') fast = False if fast: from ._graph import dilation if self.E > 0: if (self.field.size == self.V): self.field = self.field.reshape((self.V, 1)) idx, neighb, _ = self.compact_neighb() for i in range(nbiter): dilation(self.field, idx, neighb) else: from scipy.sparse import dia_matrix adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows for i in range(nbiter): self.field = np.array([self.field[row].max(0) for row in rows]) def highest_neighbor(self, refdim=0): """Computes the neighbor with highest field value along refdim Parameters ---------- refdim: int, optional, the dimension of the field under consideration Returns ------- hneighb: array of shape(self.V), index of the neighbor with highest value """ from scipy.sparse import dia_matrix refdim = int(refdim) # add self-edges to avoid singularities, when taking the maximum adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows hneighb = np.array([row[self.field[row].argmax()] for row in rows]) return hneighb def erosion(self, nbiter=1): """Morphological opening of the field Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) lil = self.to_coo_matrix().tolil().rows.tolist() for i in range(nbiter): nf = np.zeros_like(self.field) for k, neighbors in enumerate(lil): nf[k] = self.field[neighbors].min(0) self.field = nf def get_local_maxima(self, refdim=0, th=NEGINF): """ Look for the local maxima of one dimension (refdim) of self.field Parameters ---------- refdim (int) the field dimension over which the maxima are looked after th = float, optional threshold so that only values above th are considered Returns ------- idx: array of shape (nmax) indices of the vertices that are local maxima depth: array of shape (nmax) topological depth of the local maxima : depth[idx[i]] = q means that idx[i] is a q-order maximum """ depth_all = self.local_maxima(refdim, th) idx = np.ravel(np.where(depth_all)) depth = depth_all[idx] return idx, depth def local_maxima(self, refdim=0, th=NEGINF): """Returns all the local maxima of a field Parameters ---------- refdim (int) field dimension over which the maxima are looked after th: float, optional threshold so that only values above th are considered Returns ------- depth: array of shape (nmax) a labelling of the vertices such that depth[v] = 0 if v is not a local maximum depth[v] = 1 if v is a first order maximum ... depth[v] = q if v is a q-order maximum """ refdim = int(refdim) if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError(refdim > self.shape[1]) depth = np.zeros(self.V, np.int_) # create a subfield(thresholding) sf = self.subfield(self.field.T[refdim] >= th) initial_field = sf.field.T[refdim] sf.field = initial_field.astype(np.float64) # compute the depth in the subgraph ldepth = sf.V * np.ones(sf.V, np.int_) for k in range(sf.V): dilated_field_old = sf.field.ravel().copy() sf.dilation(1) non_max = sf.field.ravel() > dilated_field_old ldepth[non_max] = np.minimum(k, ldepth[non_max]) if (non_max == False).all(): ldepth[sf.field.ravel() == initial_field] = np.maximum(k, 1) break # write all the depth values depth[self.field[:, refdim] >= th] = ldepth return depth def diffusion(self, nbiter=1): """diffusion of the field data in the weighted graph structure self.field is changed inplace Parameters ---------- nbiter: int, optional the number of iterations required Notes ----- The process is run for all the dimensions of the field """ nbiter = int(nbiter) adj = self.to_coo_matrix() for i in range(nbiter): self.field = adj * self.field def custom_watershed(self, refdim=0, th=NEGINF): """ customized watershed analysis of the field. Note that bassins are found around each maximum (and not minimum as conventionally) Parameters ---------- refdim: int, optional th: float optional, threshold of the field Returns ------- idx: array of shape (nbassins) indices of the vertices that are local maxima label : array of shape (self.V) labelling of the vertices according to their bassin """ from numpy import ma if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int_) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) # compute the basins hneighb = sf.highest_neighbor() edges = np.vstack((hneighb, np.arange(sf.V))).T edges = np.vstack((edges, np.vstack((np.arange(sf.V), hneighb)).T)) aux = Graph(sf.V, edges.shape[0], edges) llabel = aux.cc() n_bassins = len(np.unique(llabel)) # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(n_bassins)]) return idx, label def threshold_bifurcations(self, refdim=0, th=NEGINF): """Analysis of the level sets of the field: Bifurcations are defined as changes in the topology in the level sets when the level (threshold) is varied This can been thought of as a kind of Morse analysis Parameters ---------- th: float, optional, threshold so that only values above th are considered Returns ------- idx: array of shape (nlsets) indices of the vertices that are local maxima height: array of shape (nlsets) the depth of the local maxima depth[idx[i]] = q means that idx[i] is a q-order maximum Note that this is also the diameter of the basins associated with local maxima parents: array of shape (nlsets) the label of the maximum which dominates each local maximum i.e. it describes the hierarchy of the local maxima label: array of shape (self.V) a labelling of thevertices according to their bassin """ from numpy import ma if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int_) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) initial_field = sf.field[:, refdim].copy() sf.field = initial_field.copy() # explore the subfield order = np.argsort(- initial_field) rows = sf.to_coo_matrix().tolil().rows llabel = - np.ones(sf.V, np.int_) parent, root = np.arange(2 * self.V), np.arange(2 * self.V) # q will denote the region index q = 0 for i in order: if (llabel[rows[i]] > - 1).any(): nlabel = np.unique(llabel[rows[i]]) if nlabel[0] == -1: nlabel = nlabel[1:] nlabel = np.unique(root[nlabel]) if len(nlabel) == 1: # we are at a regular point llabel[i] = nlabel[0] else: # we are at a saddle point llabel[i] = q parent[nlabel] = q root[nlabel] = q for j in nlabel: root[root == j] = q q += 1 else: # this is a new component llabel[i] = q q += 1 parent = parent[:q] # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(q)]) return idx, parent, label def constrained_voronoi(self, seed): """Voronoi parcellation of the field starting from the input seed Parameters ---------- seed: int array of shape(p), the input seeds Returns ------- label: The resulting labelling of the data Notes ----- FIXME: deal with graphs with several ccs """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') seed = seed.astype(np.int_) weights = np.sqrt(np.sum((self.field[self.edges.T[0]] - self.field[self.edges.T[1]]) ** 2, 1)) g = WeightedGraph(self.V, self.edges, weights) label = g.voronoi_labelling(seed) return label def geodesic_kmeans(self, seeds=None, label=None, maxiter=100, eps=1.e-4, verbose=0): """ Geodesic k-means algorithm i.e. obtention of clusters that are topologically connected and minimally variable concerning the information of self.field Parameters ---------- seeds: array of shape(p), optional, initial indices of the seeds within the field if seeds==None the labels are used as initialization labels: array of shape(self.V) initial labels, optional, it is expected that labels take their values in a certain range (0..lmax) if Labels==None, this is not used if seeds==None and labels==None, an ewxception is raised maxiter: int, optional, maximal number of iterations eps: float, optional, increase of inertia at which convergence is declared Returns ------- seeds: array of shape (p), the final seeds label : array of shape (self.V), the resulting field label J: float, inertia value """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if (seeds is None) and (label is None): raise ValueError('No initialization has been provided') k = np.size(seeds) inertia_old = NEGINF if seeds is None: k = label.max() + 1 if np.size(np.unique(label)) != k: raise ValueError('missing values, cannot proceed') seeds = np.zeros(k).astype(np.int_) for j in range(k): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] else: k = np.size(seeds) for i in range(maxiter): # voronoi labelling label = self.constrained_voronoi(seeds) # update the seeds inertia = 0 pinteria = 0 for j in range(k): lj = np.nonzero(label == j)[0] pinteria += np.sum( (self.field[seeds[j]] - self.field[lj]) ** 2) cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] inertia += np.sum((cent - self.field[lj]) ** 2) if verbose: print(i, inertia) if np.absolute(inertia_old - inertia) < eps: break inertia_old = inertia return seeds, label, inertia def ward(self, nbcluster): """Ward's clustering of self Parameters ---------- nbcluster: int, the number of desired clusters Returns ------- label: array of shape (self.V) the resulting field label J (float): the resulting inertia """ from nipy.algorithms.clustering.hierarchical_clustering import ward_segment label, J = ward_segment(self, self.field, qmax=nbcluster) # compute the resulting inertia inertia = 0 for j in range(nbcluster): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) inertia += np.sum((cent - self.field[lj]) ** 2) return label, inertia def copy(self): """ copy function """ return Field(self.V, self.edges.copy(), self.weights.copy(), self.field.copy()) def subfield(self, valid): """Returns a subfield of self, with only vertices such that valid > 0 Parameters ---------- valid: array of shape (self.V), nonzero for vertices to be retained Returns ------- F: Field instance, the desired subfield of self Notes ----- The vertices are renumbered as [1..p] where p = sum(valid>0) when sum(valid) == 0 then None is returned """ G = self.subgraph(valid) if G is None: return None field = self.field[valid] if len(G.edges) == 0: edges = np.array([[], []]).T else: edges = G.edges return Field(G.V, edges, G.weights, field) nipy-0.6.1/nipy/algorithms/graph/forest.py000066400000000000000000000350261470056100100206020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Module implements the Forest class A Forest is a graph with a hierarchical structure. Each connected component of a forest is a tree. The main characteristic is that each node has a single parent, so that a Forest is fully characterized by a "parent" array, that defines the unique parent of each node. The directed relationships are encoded by the weight sign. Note that some methods of WeightedGraph class (e.g. dijkstra's algorithm) require positive weights, so that they cannot work on forests in the current implementation. Specific methods (e.g. all_sidtance()) have been set instead. Main author: Bertrand thirion, 2007-2011 """ import numpy as np from .graph import WeightedGraph class Forest(WeightedGraph): """ Forest structure, i.e. a set of trees The nodes can be segmented into trees. Within each tree a node has one parent and children that describe the associated hierarchical structure. Some of the nodes can be viewed as leaves, other as roots The edges within a tree are associated with a weight: * +1 from child to parent * -1 from parent to child Attributes ---------- V : int int > 0, the number of vertices E : int the number of edges parents : (self.V,) array the parent array edges : (self.E, 2) array representing pairwise neighbors weights : (self.E,) array +1/-1 for ascending/descending links children: list list of arrays that represents the children any node """ def __init__(self, V, parents=None): """Constructor Parameters ---------- V : int the number of edges of the graph parents : None or (V,) array the parents of zach vertex. If `parents`==None , the parents are set to range(V), i.e. each node is its own parent, and each node is a tree """ V = int(V) if V < 1: raise ValueError('cannot create graphs with no vertex') self.V = int(V) # define the parents if parents is None: self.parents = np.arange(self.V).astype(np.int_) else: if np.size(parents) != V: raise ValueError('Incorrect size for parents') if parents.max() > self.V: raise ValueError('Incorrect value for parents') self.parents = np.reshape(parents, self.V).astype(np.int_) self.define_graph_attributes() if self.check() == 0: raise ValueError('The proposed structure is not a forest') self.children = [] def define_graph_attributes(self): """define the edge and weights array """ self.edges = np.array([]).astype(np.int_) self.weights = np.array([]) i = np.nonzero(self.parents != np.arange(self.V))[0] if np.size(i) > 0: E1 = np.hstack((i, self.parents[i])) E2 = np.hstack((self.parents[i], i)) self.edges = (np.vstack((E1, E2))).astype(np.int_).T self.weights = np.hstack((np.ones(np.size(i)), - np.ones(np.size(i)))) self.E = np.size(self.weights) self.edges = self.edges def compute_children(self): """Define the children of each node (stored in self.children) """ self.children = [np.array([]) for v in range(self.V)] if self.E > 0: K = self.copy() K.remove_edges(K.weights < 0) self.children = K.to_coo_matrix().tolil().rows.tolist() def get_children(self, v=-1): """ Get the children of a node/each node Parameters ---------- v: int, optional a node index Returns ------- children: list of int the list of children of node v (if v is provided) a list of lists of int, the children of all nodes otherwise """ v = int(v) if v > -1: if v > self.V - 1: raise ValueError('the given node index is too high') if self.children == []: self.compute_children() if v == -1: return self.children else: return self.children[v] def get_descendants(self, v, exclude_self=False): """returns the nodes that are children of v as a list Parameters ---------- v: int, a node index Returns ------- desc: list of int, the list of all descendant of the input node """ v = int(v) if v < 0: raise ValueError('the given node index is too low') if v > self.V - 1: raise ValueError('the given node index is too high') if self.children == []: self.compute_children() if len(self.children[v]) == 0: return [v] else: desc = [v] for w in self.children[v]: desc.extend(self.get_descendants(w)) desc.sort() if exclude_self and v in desc: desc = [i for i in desc if i != v] return desc def check(self): """Check that self is indeed a forest, i.e. contains no loop Returns ------- a boolean b=0 iff there are loops, 1 otherwise Notes ----- Slow implementation, might be rewritten in C or cython """ b = 1 if self.V == 1: return b for v in range(self.V): w = v q = 0 while(self.parents[w] != w): w = self.parents[w] if w == v: b = 0 break q += 1 if q > self.V: b = 0 break if b == 0: break return b def isleaf(self): """ Identification of the leaves of the forest Returns ------- leaves: bool array of shape(self.V), indicator of the forest's leaves """ leaves = np.ones(self.V).astype('bool') if self.E > 0: leaves[self.edges[self.weights > 0, 1]] = 0 return leaves def isroot(self): """ Returns an indicator of nodes being roots Returns ------- roots, array of shape(self.V, bool), indicator of the forest's roots """ roots = np.array(self.parents == np.arange(self.V)) return roots def subforest(self, valid): """ Creates a subforest with the vertices for which valid > 0 Parameters ---------- valid: array of shape (self.V): indicator of the selected nodes Returns ------- subforest: a new forest instance, with a reduced set of nodes Notes ----- The children of deleted vertices become their own parent """ if np.size(valid) != self.V: raise ValueError("incompatible size for self anf valid") parents = self.parents.copy() j = np.nonzero(valid[self.parents] == 0)[0] parents[j] = j parents = parents[valid.astype(bool)] renumb = np.hstack((0, np.cumsum(valid))) parents = renumb[parents] F = Forest(np.sum(valid), parents) return F def merge_simple_branches(self): """ Return a subforest, where chained branches are collapsed Returns ------- sf, Forest instance, same as self, without any chain """ valid = np.ones(self.V).astype('bool') children = self.get_children() for k in range(self.V): if np.size(children[k]) == 1: valid[k] = 0 return self.subforest(valid) def all_distances(self, seed=None): """returns all the distances of the graph as a tree Parameters ---------- seed=None array of shape(nbseed) with valuesin [0..self.V-1] set of vertices from which tehe distances are computed Returns ------- dg: array of shape(nseed, self.V), the resulting distances Notes ----- By convention infinite distances are given the distance np.inf """ if (hasattr(seed, '__iter__') == False) & (seed is not None): seed = [seed] if self.E > 0: w = self.weights.copy() self.weights = np.absolute(self.weights) dg = self.floyd(seed) dg[dg == (np.sum(self.weights) + 1)] = np.inf self.weights = w return dg else: return np.inf * np.ones((self.V, self.V)) def depth_from_leaves(self): """compute an index for each node: 0 for the leaves, 1 for their parents etc. and maximal for the roots. Returns ------- depth: array of shape (self.V): the depth values of the vertices """ depth = self.isleaf().astype(np.int_)-1 for j in range(self.V): dc = depth.copy() for i in range(self.V): if self.parents[i] != i: depth[self.parents[i]] = np.maximum(depth[i] + 1,\ depth[self.parents[i]]) if dc.max() == depth.max(): break return depth def reorder_from_leaves_to_roots(self): """reorder the tree so that the leaves come first then their parents and so on, and the roots are last. Returns ------- order: array of shape(self.V) the order of the old vertices in the reordered graph """ depth = self.depth_from_leaves() order = np.argsort(depth) iorder = np.arange(self.V) for i in range(self.V): iorder[order[i]] = i parents = iorder[self.parents[order]] self.parents = parents self.define_graph_attributes() return order def leaves_of_a_subtree(self, ids, custom=False): """tests whether the given nodes are the leaves of a certain subtree Parameters ---------- ids: array of shape (n) that takes values in [0..self.V-1] custom == False, boolean if custom==true the behavior of the function is more specific - the different connected components are considered as being in a same greater tree - when a node has more than two subbranches, any subset of these children is considered as a subtree """ leaves = self.isleaf().astype('bool') for i in ids: if leaves[i] == 0: raise ValueError("some of the ids are not leaves") #1. find the highest node that is a common ancestor to all leaves # if there is none, common ancestor is -1 com_ancestor = ids[0] for i in ids: ca = i dca = self.get_descendants(ca) while com_ancestor not in dca: ca = self.parents[ca] dca = self.get_descendants(ca) if (ca == self.parents[ca]) & (com_ancestor not in dca): ca = -1 break com_ancestor = ca #2. check whether all the children of this ancestor are within ids if com_ancestor > -1: st = self.get_descendants(com_ancestor) valid = [i in ids for i in st if leaves[i]] bresult = (np.sum(valid) == np.size(valid)) if custom == False: return bresult # now, custom =True # check that subtrees of ancestor are consistently labelled kids = self.get_children(com_ancestor) if np.size(kids) > 2: bresult = True for v in kids: st = np.array(self.get_descendants(v)) st = st[leaves[st]] if np.size(st) > 1: valid = [i in ids for i in st] bresult *= ((np.sum(valid) == np.size(valid)) + np.sum(valid == 0)) return bresult # now, common ancestor is -1 if custom == False: st = np.squeeze(np.nonzero(leaves)) valid = [i in ids for i in st] bresult = (np.sum(valid) == np.size(valid)) else: cc = self.cc() bresult = True for i in ids: st = np.squeeze(np.nonzero((cc == cc[i]) * leaves)) if np.size(st) > 1: valid = [i in ids for i in st] bresult *= (np.sum(valid) == np.size(valid)) else: bresult *= (st in ids) return bresult def tree_depth(self): """ Returns the number of hierarchical levels in the tree """ depth = self.depth_from_leaves() return depth.max() + 1 def propagate_upward_and(self, prop): """propagates from leaves to roots some binary property of the nodes so that prop[parents] = logical_and(prop[children]) Parameters ---------- prop, array of shape(self.V), the input property Returns ------- prop, array of shape(self.V), the output property field """ prop = np.asanyarray(prop).copy() if np.size(prop) != self.V: raise ValueError("incoherent size for prop") prop[self.isleaf() == False] = True for j in range(self.tree_depth()): for i in range(self.V): if prop[i] == False: prop[self.parents[i]] = False return prop def propagate_upward(self, label): """ Propagation of a certain labelling from leaves to roots Assuming that label is a certain positive integer field this propagates these labels to the parents whenever the children nodes have coherent properties otherwise the parent value is unchanged Parameters ---------- label: array of shape(self.V) Returns ------- label: array of shape(self.V) """ label = np.asanyarray(label).copy() if np.size(label) != self.V: raise ValueError("incoherent size for label") ch = self.get_children() depth = self.depth_from_leaves() for j in range(1, depth.max() + 1): for i in range(self.V): if depth[i] == j: if np.size(np.unique(label[ch[i]])) == 1: label[i] = np.unique(label[ch[i]]) return label nipy-0.6.1/nipy/algorithms/graph/graph.py000066400000000000000000001143761470056100100204070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements two graph classes: Graph: basic topological graph, i.e. vertices and edges. This kind of object only has topological properties WeightedGraph (Graph): also has a value associated with edges, called weights, that are used in some computational procedures (e.g. path length computation). Importantly these objects are equivalent to square sparse matrices, which is used to perform certain computations. This module also provides several functions to instantiate WeightedGraphs from data: - k nearest neighbours (where samples are rows of a 2D-array) - epsilon-neighbors (where sample rows of a 2D-array) - representation of the neighbors on a 3d grid (6-, 18- and 26-neighbors) - Minimum Spanning Tree (where samples are rows of a 2D-array) Author: Bertrand Thirion, 2006--2011 """ import numpy as np from scipy.sparse import coo_matrix class Graph: """ Basic topological (non-weighted) directed Graph class Member variables: * V (int > 0): the number of vertices * E (int >= 0): the number of edges Properties: * vertices (list, type=int, shape=(V,)) vertices id * edges (list, type=int, shape=(E,2)): edges as vertices id tuples """ ### Constructor def __init__(self, V, E=0, edges=None): """ Constructor Parameters ---------- V : int the number of vertices E : int, optional the number of edges edges : None or shape (E, 2) array, optional edges of graph """ # deal with vertices self.__set_V(V) self.vertices = np.arange(self.V) # deal with edges if not isinstance(edges, None.__class__): self.__set_E(np.shape(edges)[0]) self.set_edges(edges) else: self.__set_E(E) self.set_edges(np.zeros((self.E, 2), dtype=int)) ### Accessors def get_vertices(self): """ To get the graph's vertices (as id) """ return self.vertices def get_edges(self): """To get the graph's edges """ try: temp = self.edges except: temp = [] return temp def get_V(self): """To get the number of vertices in the graph """ return self.V def get_E(self): """To get the number of edges in the graph """ return self.E ### Mutators def __set_V(self, V): """ Sets the graph's number of vertices. This methods is defined as private since we don't want the number of vertices to be modified outside the graph object methods. """ self.V = int(V) if self.V < 1: raise ValueError('Empty graphs cannot be created') def __set_E(self, E): """Sets the graph's number of edges. This methods is defined as private since we don't want the number of edges to be modified outside the graph object methods. """ self.E = int(E) if self.E < 0: self.E = 0 def set_edges(self, edges): """Sets the graph's edges Preconditions: * edges has a correct size * edges take values in [1..V] """ if (not isinstance(edges, None.__class__) and (edges.size != 0)): if ((np.shape(edges)[0] != self.E) or (np.shape(edges)[1] != 2)): raise ValueError('Incompatible size of the edge matrix') if edges.max() + 1 > self.V: raise ValueError('Incorrect edge specification') self.edges = edges else: self.edges = [] ### Methods def adjacency(self): """returns the adjacency matrix of the graph as a sparse coo matrix Returns ------- adj: scipy.sparse matrix instance, that encodes the adjacency matrix of self """ if self.E > 0: i = self.edges[:, 0] j = self.edges[:, 1] adj = coo_matrix((np.ones(self.E), (i, j)), shape=(self.V, self.V)) else: adj = coo_matrix((self.V, self.V)) return adj def cc(self): """Compte the different connected components of the graph. Returns ------- label: array of shape(self.V), labelling of the vertices """ try: from scipy.sparse import cs_graph_components _, label = cs_graph_components(self.adjacency()) except: pass lil = self.to_coo_matrix().tolil().rows.tolist() label = lil_cc(lil) return label def degrees(self): """Returns the degree of the graph vertices. Returns ------- rdegree: (array, type=int, shape=(self.V,)), the right degrees ldegree: (array, type=int, shape=(self.V,)), the left degrees """ A = self.adjacency() A.data = np.ones(A.nnz) right = np.array(A.sum(1)).ravel() left = np.array(A.sum(0)).ravel() return right, left def main_cc(self): """Returns the indexes of the vertices within the main cc Returns ------- idx: array of shape (sizeof main cc) """ if self.E > 0: cc = self.cc() pop = np.array([np.sum(cc == k) for k in np.unique(cc)]) idx = np.nonzero(cc == pop.argmax())[0] else: idx = 0 return idx def to_coo_matrix(self): """ Return adjacency matrix as coo sparse Returns ------- sp: scipy.sparse matrix instance, that encodes the adjacency matrix of self """ if self.E > 0: i, j = self.edges.T sm = coo_matrix((np.ones(self.E), (i, j)), shape=(self.V, self.V)) else: sm = coo_matrix((self.V, self.V)) return sm def show(self, ax=None): """Shows the graph as a planar one. Parameters ---------- ax, axis handle Returns ------- ax, axis handle """ import matplotlib.pyplot as plt if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) t = (2 * np.pi * np.arange(self.V)) / self.V plt.plot(np.cos(t), np.sin(t), '.') planar_edges = np.ravel((self.edges * 2 * np.pi) / self.V) ax.plot(np.cos(planar_edges), np.sin(planar_edges), 'k') ax.axis('off') return ax ##################################################################### # WeightedGraph ##################################################################### def wgraph_from_coo_matrix(x): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: scipy.sparse.coo_matrix instance, the input matrix Returns ------- wg: WeightedGraph instance """ if x.shape[0] != x.shape[1]: raise ValueError("the input coo_matrix is not square") i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data wg = WeightedGraph(x.shape[0], edges, weights) return wg def wgraph_from_adjacency(x): """Instantiates a weighted graph from a square 2D array Parameters ---------- x: 2D array instance, the input array Returns ------- wg: WeightedGraph instance """ a = coo_matrix(x) return wgraph_from_coo_matrix(a) def complete_graph(n): """ returns a complete graph with n vertices """ return wgraph_from_adjacency(np.ones((n, n))) def mst(X): """ Returns the WeightedGraph that is the minimum Spanning Tree of X Parameters ---------- X: data array, of shape(n_samples, n_features) Returns ------- the corresponding WeightedGraph instance """ n = X.shape[0] label = np.arange(n).astype(np.intp) edges = np.zeros((0, 2)).astype(np.intp) # upper bound on maxdist**2 maxdist = 4 * np.sum((X - X[0]) ** 2, 1).max() nbcc = n while nbcc > 1: mindist = maxdist * np.ones(nbcc) link = - np.ones((nbcc, 2)).astype(np.intp) # find nearest neighbors for n1 in range(n): j = label[n1] newdist = np.sum((X[n1] - X) ** 2, 1) newdist[label == j] = maxdist n2 = np.argmin(newdist) if newdist[n2] < mindist[j]: mindist[j] = newdist[n2] link[j] = np.array([n1, n2]) # merge nearest neighbors nnbcc = nbcc idx = np.arange(nbcc) for i in range(nnbcc): k, j = label[link[i]] while k > idx[k]: k = idx[k] while j > idx[j]: j = idx[j] if k != j: edges = np.vstack((edges, link[i], np.array([link[i, 1], link[i, 0]]))) idx[max(j, k)] = min(j, k) nbcc -= 1 # relabel the graph label = WeightedGraph(n, edges, np.ones(edges.shape[0])).cc() nbcc = label.max() + 1 d = np.sqrt(np.sum((X[edges[:, 0]] - X[edges[:, 1]]) ** 2, 1)) return WeightedGraph(n, edges, d) def knn(X, k=1): """returns the k-nearest-neighbours graph of the data Parameters ---------- X, array of shape (n_samples, n_features): the input data k, int, optional: is the number of neighbours considered Returns ------- the corresponding WeightedGraph instance Notes ----- The knn system is symmetrized: if (ab) is one of the edges then (ba) is also included """ from ..utils.fast_distance import euclidean_distance if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) try: k = int(k) except: "k cannot be cast to an int" if np.isnan(k): raise ValueError('k is nan') if np.isinf(k): raise ValueError('k is inf') k = min(k, X.shape[0] - 1) # create the distance matrix dist = euclidean_distance(X) sorted_dist = dist.copy() sorted_dist.sort(0) # neighbour system bool_knn = dist < sorted_dist[k + 1] bool_knn += bool_knn.T # xor diagonal bool_knn ^= np.diag(np.diag(bool_knn)) dist *= (bool_knn > 0) return wgraph_from_adjacency(dist) def eps_nn(X, eps=1.): """Returns the eps-nearest-neighbours graph of the data Parameters ---------- X, array of shape (n_samples, n_features), input data eps, float, optional: the neighborhood width Returns ------- the resulting graph instance """ from ..utils.fast_distance import euclidean_distance if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) try: eps = float(eps) except: "eps cannot be cast to a float" if np.isnan(eps): raise ValueError('eps is nan') if np.isinf(eps): raise ValueError('eps is inf') dist = euclidean_distance(X) dist = np.maximum(dist, 1.e-16) dist[dist >= eps] = 0 # this would is just for numerical reasons dist -= np.diag(np.diag(dist)) return wgraph_from_adjacency(dist) def lil_cc(lil): """ Returns the connected components of a graph represented as a list of lists Parameters ---------- lil: a list of list representing the graph neighbors Returns ------- label a vector of shape len(lil): connected components labelling Notes ----- Dramatically slow for non-sparse graphs """ n = len(lil) visited = np.zeros(n).astype(np.intp) label = - np.ones(n).astype(np.intp) k = 0 while (visited == 0).any(): front = [np.argmin(visited)] while len(front) > 0: pivot = front.pop(0) if visited[pivot] == 0: visited[pivot] = 1 label[pivot] = k front += lil[pivot] k += 1 return label def graph_3d_grid(xyz, k=18): """ Utility that computes the six neighbors on a 3d grid Parameters ---------- xyz: array of shape (n_samples, 3); grid coordinates of the points k: neighboring system, equal to 6, 18, or 26 Returns ------- i, j, d 3 arrays of shape (E), where E is the number of edges in the resulting graph (i, j) represent the edges, d their weights """ if np.size(xyz) == 0: return None lxyz = xyz - xyz.min(0) m = 3 * lxyz.max(0).sum() + 2 # six neighbours n6 = [np.array([1, m, m ** 2]), np.array([m ** 2, 1, m]), np.array([m, m ** 2, 1])] # eighteen neighbours n18 = [np.array([1 + m, 1 - m, m ** 2]), np.array([1 + m, m - 1, m ** 2]), np.array([m ** 2, 1 + m, 1 - m]), np.array([m ** 2, 1 + m, m - 1]), np.array([1 - m, m ** 2, 1 + m]), np.array([m - 1, m ** 2, 1 + m])] # twenty-six neighbours n26 = [np.array([1 + m + m ** 2, 1 - m, 1 - m ** 2]), np.array([1 + m + m ** 2, m - 1, 1 - m ** 2]), np.array([1 + m + m ** 2, 1 - m, m ** 2 - 1]), np.array([1 + m + m ** 2, m - 1, m ** 2 - 1])] # compute the edges in each possible direction def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]), weights=np.array([])): q = 0 for nn_row in nn: v1 = np.dot(lxyz, nn_row) o1 = np.argsort(v1) sv1 = v1[o1] nz = np.squeeze(np.nonzero(sv1[: - 1] - sv1[1:] == - l1dist)) o1z, o1z1 = o1[nz], o1[nz + 1] left = np.hstack((left, o1z, o1z1)) right = np.hstack((right, o1z1, o1z)) q += 2 * np.size(nz) weights = np.hstack((weights, np.sqrt(l1dist) * np.ones(q))) return left, right, weights i, j, d = create_edges(lxyz, n6, 1.) if k >= 18: i, j, d = create_edges(lxyz, n18, 2, i, j, d) if k == 26: i, j, d = create_edges(lxyz, n26, 3, i, j, d) i, j = i.astype(np.intp), j.astype(np.intp) # reorder the edges to have a more standard order order = np.argsort(i + j * (len(i) + 1)) i, j, d = i[order], j[order], d[order] return i, j, d def wgraph_from_3d_grid(xyz, k=18): """Create graph as the set of topological neighbours of the three-dimensional coordinates set xyz, in the k-connectivity scheme Parameters ---------- xyz: array of shape (nsamples, 3) and type np.intp, k = 18: the number of neighbours considered. (6, 18 or 26) Returns ------- the WeightedGraph instance """ if xyz.shape[1] != 3: raise ValueError('xyz should have shape n * 3') if k not in [6, 18, 26]: raise ValueError('k should be equal to 6, 18 or 26') i, j, d = graph_3d_grid(xyz, k) edges = np.vstack((i, j)).T return WeightedGraph(xyz.shape[0], edges, d) def concatenate_graphs(G1, G2): """Returns the concatenation of the graphs G1 and G2 It is thus assumed that the vertices of G1 and G2 represent disjoint sets Parameters ---------- G1, G2: the two WeightedGraph instances to be concatenated Returns ------- G, WeightedGraph, the concatenated graph Notes ----- This implies that the vertices of G corresponding to G2 are labeled [G1.V .. G1.V+G2.V] """ V = G1.V + G2.V edges = np.vstack((G1.edges, G1.V + G2.edges)) weights = np.hstack((G1.weights, G2.weights)) G = WeightedGraph(V, edges, weights) return G class WeightedGraph(Graph): """Basic weighted, directed graph class Member variables: * V (int): the number of vertices * E (int): the number of edges Methods * vertices (list, type=int, shape=(V,)): vertices id * edges (list, type=int, shape=(E,2)): edges as vertices id tuples * weights (list, type=int, shape=(E,)): weights / lengths of the graph's edges """ ### Constructor def __init__(self, V, edges=None, weights=None): """ Constructor Parameters ---------- V : int (int > 0) the number of vertices edges : (E, 2) array, type int edges of the graph weights : (E, 2) array, type=int weights/lengths of the edges """ Graph.__init__(self, V, edges=edges) if isinstance(weights, None.__class__): new_weights = [] else: new_weights = weights self.set_weights(new_weights) def set_weights(self, weights): """ Set edge weights Parameters ---------- weights: array array shape(self.V): edges weights """ if np.size(weights) != self.E: raise ValueError('The weight size is not the edges size') else: self.weights = np.reshape(weights, (self.E)) def get_weights(self): return self.weights def from_3d_grid(self, xyz, k=18): """Sets the graph to be the topological neighbours graph of the three-dimensional coordinates set xyz, in the k-connectivity scheme Parameters ---------- xyz: array of shape (self.V, 3) and type np.intp, k = 18: the number of neighbours considered. (6, 18 or 26) Returns ------- E(int): the number of edges of self """ if xyz.shape[0] != self.V: raise ValueError('xyz should have shape n * 3, with n = self.V') if xyz.shape[1] != 3: raise ValueError('xyz should have shape n * 3') graph = graph_3d_grid(xyz, k) if graph is not None: i, j, d = graph else: raise TypeError('Creating graph from grid failed. '\ 'Maybe the grid is too big') self.E = np.size(i) self.edges = np.zeros((self.E, 2), np.intp) self.edges[:, 0] = i self.edges[:, 1] = j self.weights = np.array(d) return self.E def cut_redundancies(self): """ Returns a graph with redundant edges removed: ecah edge (ab) is present only once in the edge matrix: the correspondng weights are added. Returns ------- the resulting WeightedGraph """ A = self.to_coo_matrix().tocsr().tocoo() return wgraph_from_coo_matrix(A) def dijkstra(self, seed=0): """ Returns all the [graph] geodesic distances starting from seed x Parameters ---------- seed (int, >-1, for each vertex a, sum{edge[e, 0]=a} D[e]=1 c == 1 => for each vertex b, sum{edge[e, 1]=b} D[e]=1 c == 2 => symmetric ('l2') normalization Notes ----- Note that when sum_{edge[e, .] == a } D[e] = 0, nothing is performed """ from scipy.sparse import dia_matrix c = int(c) if c not in [0, 1, 2]: raise ValueError('c must be equal to 0, 1 or 2') if self.E == 0: if c < 2: return np.zeros(self.V) else: return np.zeros(self.V), np.zeros(self.V) adj = self.to_coo_matrix().tocsr() s1 = adj.sum(0) s2 = adj.sum(1) if c == 1: s = dia_matrix((1. / s1, 0), shape=(self.V, self.V)) adj = adj * s self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s1) if c == 0: s = dia_matrix((1. / s2.T, 0), shape=(self.V, self.V)) adj = s * adj self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s2) if c == 2: s1 = dia_matrix((1. / np.sqrt(s1), 0), shape=(self.V, self.V)) s2 = dia_matrix((1. / np.sqrt(adj.sum(1)), 0), shape=(self.V, self.V)) adj = (s1 * adj) * s2 self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s1), np.asarray(s2) def set_euclidian(self, X): """ Compute the weights of the graph as the distances between the corresponding rows of X, which represents an embedding of self Parameters ---------- X array of shape (self.V, edim), the coordinate matrix of the embedding """ if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) if X.shape[0] != self.V: raise ValueError('X.shape[0] != self.V') if self.E > 0: d = np.sum((X[self.edges[:, 0]] - X[self.edges[:, 1]]) ** 2, 1) self.weights = np.sqrt(d) def set_gaussian(self, X, sigma=0): """ Compute the weights of the graph as a gaussian function of the distance between the corresponding rows of X, which represents an embedding of self Parameters ---------- X array of shape (self.V, dim) the coordinate matrix of the embedding sigma=0, float: the parameter of the gaussian function Notes ----- When sigma == 0, the following value is used: ``sigma = sqrt(mean(||X[self.edges[:, 0], :]-X[self.edges[:, 1], :]||^2))`` """ sigma = float(sigma) if sigma < 0: raise ValueError('sigma should be positive') self.set_euclidian(X) d = self.weights if sigma == 0: sigma = (d ** 2).mean() w = np.exp(- (d ** 2) / (2 * sigma)) self.weights = w def symmeterize(self): """Symmeterize self, modify edges and weights so that self.adjacency becomes the symmetric part of the current self.adjacency. """ A = self.to_coo_matrix() symg = wgraph_from_adjacency((A + A.T) / 2) self.E = symg.E self.edges = symg.edges self.weights = symg.weights return self def anti_symmeterize(self): """anti-symmeterize self, i.e. produces the graph whose adjacency matrix would be the antisymmetric part of its current adjacency matrix """ A = self.to_coo_matrix() symg = wgraph_from_adjacency((A - A.T) / 2) self.E = symg.E self.edges = symg.edges self.weights = symg.weights return self.E def voronoi_labelling(self, seed): """ Performs a voronoi labelling of the graph Parameters ---------- seed: array of shape (nseeds), type (np.intp), vertices from which the cells are built Returns ------- labels: array of shape (self.V) the labelling of the vertices """ import heapq if hasattr(seed, '__iter__') == False: seed = [seed] try: if (self.weights < 0).any(): raise ValueError('some weights are non-positive') except: raise ValueError('undefined weights') dist, active = np.inf * np.ones(self.V), np.ones(self.V) label = - np.ones(self.V, np.intp) idx, neighb, weight = self.compact_neighb() dist[seed] = 0 label[seed] = np.arange(len(seed)) dg = list(zip(np.zeros_like(seed), seed)) heapq.heapify(dg) for j in range(self.V): end = False while True: if len(dg) == 0: end = True break node = heapq.heappop(dg) if active[node[1]]: break if end: break dwin, win = node active[win] = False # the following loop might be vectorized for i in range(idx[win], idx[win + 1]): l, newdist = neighb[i], dwin + weight[i] if newdist < dist[l]: heapq.heappush(dg, (newdist, l)) dist[l] = newdist label[l] = label[win] return label def cliques(self): """ Extraction of the graphe cliques these are defined using replicator dynamics equations Returns ------- cliques: array of shape (self.V), type (np.intp) labelling of the vertices according to the clique they belong to """ if (self.weights < 0).any(): raise ValueError('cliques definition require a positive graph') cliques, size = - np.ones(self.V), np.zeros(self.V) adj = self.to_coo_matrix() for k in range(self.V): u = cliques < 0 w = np.zeros_like(u) # replicator dynamics iterations for q in range(self.V): w = u.copy() u = (adj * u) * w if u.sum() == 0: break u /= u.sum() if ((w - u) ** 2).sum() < 1.e-12: break # threshold the result threshold = 1. / max(2., 1. * np.sum(cliques == - 1)) cliques[u > threshold] = k if np.sum(u > threshold) == 0: break size[k] = np.sum(u > threshold) if cliques.min() > - 1: break # sort the labels size = size[size > 0] order = np.argsort(- size) label = cliques.copy() for k, vv in enumerate(order): cliques[label == vv] = k return cliques def remove_trivial_edges(self): """ Removes trivial edges, i.e. edges that are (vv)-like self.weights and self.E are corrected accordingly Returns ------- self.E (int): The number of edges """ if self.E > 0: valid = self.edges[:, 0] != self.edges[:, 1] self.edges = self.edges[valid] self.weights = self.weights[valid] self.E = np.sum(valid) return self.E def subgraph(self, valid): """ Creates a subgraph with the vertices for which valid>0 and with the corresponding set of edges Parameters ---------- valid, array of shape (self.V): nonzero for vertices to be retained Returns ------- G, WeightedGraph instance, the desired subgraph of self Notes ----- The vertices are renumbered as [1..p] where p = sum(valid>0) when sum(valid==0) then None is returned """ if np.size(valid) != self.V: raise ValueError("incompatible size for self anf valid") if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = (valid[self.edges]).min(1) > 0 edges = self.edges[win_edges] weights = self.weights[win_edges] renumb = np.hstack((0, np.cumsum(valid > 0))) edges = renumb[edges] G = WeightedGraph(np.sum(valid > 0), edges, weights) else: G = WeightedGraph(np.sum(valid > 0)) return G def kruskal(self): """ Creates the Minimum Spanning Tree of self using Kruskal's algo. efficient is self is sparse Returns ------- K, WeightedGraph instance: the resulting MST Notes ----- If self contains several connected components, will have the same number k of connected components """ k = self.cc().max() + 1 E = 2 * self.V - 2 V = self.V Kedges = np.zeros((E, 2)).astype(np.intp) Kweights = np.zeros(E) iw = np.argsort(self.weights) label = np.arange(V) j = 0 for i in range(V - k): a, b = self.edges[iw[j]] d = self.weights[iw[j]] while label[a] == label[b]: j = j + 1 a, b = self.edges[iw[j]] d = self.weights[iw[j]] if label[a] != label[b]: lb = label[b] label[label == lb] = label[a] Kedges[2 * i] = np.array([a, b]) Kedges[2 * i + 1] = np.array([b, a]) Kweights[2 * i: 2 * i + 2] = d K = WeightedGraph(V, Kedges, Kweights) return K def voronoi_diagram(self, seeds, samples): """ Defines the graph as the Voronoi diagram (VD) that links the seeds. The VD is defined using the sample points. Parameters ---------- seeds: array of shape (self.V, dim) samples: array of shape (nsamples, dim) Notes ----- By default, the weights are a Gaussian function of the distance The implementation is not optimal """ from .bipartite_graph import cross_knn # checks if seeds.shape[0] != self.V: raise ValueError("The numberof seeds is not as expected") if np.size(seeds) == self.V: seeds = np.reshape(seeds, (np.size(seeds), 1)) if np.size(samples) == samples.shape[0]: samples = np.reshape(samples, (np.size(samples), 1)) if seeds.shape[1] != samples.shape[1]: raise ValueError("The seeds and samples do not belong \ to the same space") #1. define the graph knn(samples, seeds, 2) j = cross_knn(samples, seeds, 2).edges[:, 1] #2. put all the pairs i the target graph Ns = np.shape(samples)[0] self.E = Ns self.edges = np.array( [j[2 * np.arange(Ns)], j[2 * np.arange(Ns) + 1]]).T self.weights = np.ones(self.E) #3. eliminate the redundancies and set the weights self.cut_redundancies() self.symmeterize() self.set_gaussian(seeds) def show(self, X=None, ax=None): """ Plots the current graph in 2D Parameters ---------- X : None or array of shape (self.V, 2) a set of coordinates that can be used to embed the vertices in 2D. If X.shape[1]>2, a svd reduces X for display. By default, the graph is presented on a circle ax: None or int, optional ax handle Returns ------- ax: axis handle Notes ----- This should be used only for small graphs. """ if np.size(self.weights) == 0: return Graph.show() wm = self.weights.max() import matplotlib.pyplot as plt if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) ml = 5. if (X is None): for e in range(self.E): A = (self.edges[e, 0] * 2 * np.pi) / self.V B = (self.edges[e, 1] * 2 * np.pi) / self.V C = max(1, int(self.weights[e] * ml / wm)) plt.plot([np.cos(A), np.cos(B)], [np.sin(A), np.sin(B)], 'k', linewidth=C) t = (2 * np.pi * np.arange(self.V)) / self.V plt.plot(np.cos(t), np.sin(t), 'o', linewidth=ml) plt.axis([-1.1, 1.1, -1.1, 1.1]) return ax if (X.shape[0] != self.V): raise ValueError('X.shape(0)!=self.V') if np.size(X) == self.V: X = np.reshape(X, (self.V, 1)) if X.shape[1] == 1: # plot the graph on a circle x = np.pi * (X - X.min()) / (X.max() - X.min()) for e in range(self.E): A = x[self.edges[e, 0]] B = x[self.edges[e, 1]] C = max(1, int(self.weights[e] * ml / wm)) plt.plot([np.cos(A), np.cos(B)], [np.sin(A), np.sin(B)], 'k', linewidth=C) plt.plot(np.cos(x), np.sin(x), 'o', linewidth=ml) plt.axis([-1.1, 1.1, -0.1, 1.1]) if X.shape[1] > 2: Y = X.copy() from numpy.linalg import svd M1, M2, M3 = svd(Y, 0) Y = np.dot(M1, np.diag(M2)) Y = Y[:, :1] if X.shape[1] < 3: Y = X if Y.shape[1] == 2: for e in range(self.E): A = self.edges[e, 0] B = self.edges[e, 1] C = max(1, int(self.weights[e] * ml / wm)) plt.plot([Y[A, 0], Y[B, 0]], [Y[A, 1], Y[B, 1]], 'k', linewidth=C) plt.plot(Y[:, 0], Y[:, 1], 'o', linewidth=ml) xmin, xmax = Y[:, 0].min(), Y[:, 0].max() ymin, ymax = Y[:, 1].min(), Y[:, 1].max() xmin = 1.1 * xmin - 0.1 * xmax xmax = 1.1 * xmax - 0.1 * xmin ymin = 1.1 * ymin - 0.1 * ymax ymax = 1.1 * ymax - 0.1 * ymin plt.axis([xmin, xmax, ymin, ymax]) return ax def remove_edges(self, valid): """ Removes all the edges for which valid==0 Parameters ---------- valid : (self.E,) array """ if np.size(valid) != self.E: raise ValueError("the input vector does not have the correct size") valid = np.reshape(valid, np.size(valid)) self.E = int(valid.sum()) self.edges = self.edges[valid != 0] self.weights = self.weights[valid != 0] def list_of_neighbors(self): """ returns the set of neighbors of self as a list of arrays """ return self.to_coo_matrix().tolil().rows.tolist() def copy(self): """ returns a copy of self """ G = WeightedGraph(self.V, self.edges.copy(), self.weights.copy()) return G def left_incidence(self): """ Return left incidence matrix Returns ------- left_incid: list the left incidence matrix of self as a list of lists: i.e. the list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[0] = i """ linc = [[] for i in range(self.V)] for e in range(self.E): i = self.edges[e, 0] a = linc[i] a.append(e) return linc def right_incidence(self): """ Return right incidence matrix Returns ------- right_incid: list the right incidence matrix of self as a list of lists: i.e. the list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[1] = i """ rinc = [[] for i in range(self.V)] for e in range(self.E): i = self.edges[e, 1] a = rinc[i] a.append(e) return rinc def is_connected(self): """ States whether self is connected or not """ if self.V < 1: raise ValueError("empty graph") if self.V < 2: return True if self.E == 0: return False cc = self.cc() return int(cc.max() == 0) def to_coo_matrix(self): """ Return adjacency matrix as coo sparse Returns ------- sp: scipy.sparse matrix instance that encodes the adjacency matrix of self """ if self.E > 0: i, j = self.edges.T sm = coo_matrix((self.weights, (i, j)), shape=(self.V, self.V)) else: sm = coo_matrix((self.V, self.V)) return sm nipy-0.6.1/nipy/algorithms/graph/meson.build000066400000000000000000000010221470056100100210550ustar00rootroot00000000000000target_dir = 'nipy/algorithms/graph' extensions = [ '_graph', ] foreach ext: extensions py.extension_module(ext, cython_gen.process(ext + '.pyx'), c_args: cython_c_args, include_directories: [incdir_numpy], install: true, subdir: target_dir ) endforeach python_sources = [ '__init__.py', 'bipartite_graph.py', 'field.py', 'forest.py', 'graph.py' ] py.install_sources( python_sources, pure: false, subdir: target_dir ) install_subdir('tests', install_dir: install_root / target_dir) nipy-0.6.1/nipy/algorithms/graph/tests/000077500000000000000000000000001470056100100200625ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/graph/tests/__init__.py000066400000000000000000000000501470056100100221660ustar00rootroot00000000000000# Init to make test directory a package nipy-0.6.1/nipy/algorithms/graph/tests/test_bipartite_graph.py000066400000000000000000000047561470056100100246530ustar00rootroot00000000000000 import numpy as np import numpy.random as nr from ..bipartite_graph import ( check_feature_matrices, cross_eps, cross_knn, ) def basicdata(): x = np.array( [[-1.998,-2.024], [-0.117,-1.010], [1.099,-0.057], [ 1.729,-0.252], [1.003,-0.021], [1.703,-0.739], [-0.557,1.382],[-1.200,-0.446],[-0.331,-0.256], [-0.800,-1.584]]) return x def test_feature_matrices(): """ test that feature matrices are correctly checked """ x, y = nr.rand(10, 1), nr.rand(12) check_feature_matrices(x, y) check_feature_matrices(y, x) check_feature_matrices(x, x) check_feature_matrices(y, y) def test_cross_knn_1(): """ test the construction of k-nn bipartite graph """ x = basicdata() G = cross_knn(x, x, 2) assert (G.E == 20) def test_cross_knn_2(): """ test the construction of k-nn bipartite graph """ x = basicdata() G = cross_knn(x, x, 1) assert (G.E == 10) def test_cross_eps_1(): """ test the construction of eps-nn bipartite graph """ x = basicdata() y = x + 0.1 * nr.randn(x.shape[0], x.shape[1]) G = cross_eps(x, y, 1.) D = G.weights assert((D < 1).all()) def test_copy(): """ test that the weighted graph copy is OK """ x = basicdata() G = cross_knn(x, x, 2) K = G.copy() assert K.edges.shape == (20, 2) def test_subraph_left(): """ Extraction of the 'left subgraph' """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sl = g.subgraph_left(valid) assert sl.V == 7 assert sl.W == 10 assert sl.edges[:, 0].max() == 6 def test_subraph_left2(): """ Extraction of the 'left subgraph', without renumb=False """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sl = g.subgraph_left(valid, renumb=False) assert sl.V == 10 assert sl.W == 10 assert sl.edges[:, 0].max() == 6 def test_subraph_right(): """ Extraction of the 'right subgraph' """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sr = g.subgraph_right(valid) assert sr.W == 7 assert sr.V == 10 assert sr.edges[:, 1].max() == 6 def test_subraph_right2(): """ Extraction of the 'right subgraph', with renumb = False """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sr = g.subgraph_right(valid, renumb = False) assert sr.W == 10 assert sr.V == 10 assert sr.edges[:, 1].max() == 6 nipy-0.6.1/nipy/algorithms/graph/tests/test_field.py000066400000000000000000000172341470056100100225650ustar00rootroot00000000000000import numpy as np import numpy.random as nr from numpy.testing import assert_array_equal from ..field import field_from_coo_matrix_and_data, field_from_graph_and_data from ..graph import wgraph_from_3d_grid def basic_field(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = np.sum(xyz, 1).astype(np.float64) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return myfield def basic_field_random(nx=10, ny=10, nz=1): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = 0.5 * nr.randn(nx * ny * nz, 1) + np.sum(xyz, 1).astype(np.float64) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return myfield def basic_field_2(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T toto = xyz - np.array([5, 5, 5]) data = np.sum(toto ** 2, 1) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return myfield def basic_field_3(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T toto = xyz - np.array([5, 5, 5]) data = np.abs(np.sum(toto ** 2, 1) - 11 ) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return myfield def basic_graph(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = np.zeros(xyz.shape[0]) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return myfield def test_type_local_max(): f = basic_field() f.field = f.field.astype(np.float32) idx, depth = f.get_local_maxima(th=0) assert_array_equal(idx, np.array([999])) def test_max_1(): myfield = basic_field() myfield.field[555] = 30 depth = myfield.local_maxima() dep = np.zeros(1000, np.int_) dep[555] = 5 dep[999] = 3 assert sum(np.absolute(dep-depth)) < 1.e-7 def test_max_2(): myfield = basic_field() myfield.field[555] = 28 idx, depth = myfield.get_local_maxima() assert len(idx) == 2 assert_array_equal(idx, np.array([555, 999])) assert_array_equal(depth, np.array([5, 3])) def test_max_3(): myfield = basic_field() myfield.field[555] = 27 idx, depth = myfield.get_local_maxima() assert np.size(idx) == 2 assert idx[0] == 555 assert idx[1] == 999 assert depth[0] == 5 assert depth[1] == 5 def test_max_4(): myfield = basic_field() myfield.field[555] = 28 idx, depth = myfield.get_local_maxima(0, 27.5) assert np.size(idx) == 1 assert idx[0] == 555 assert depth[0] == 1 def test_smooth_1(): G = basic_graph() field = np.zeros((1000,1)) field[555,0] = 1 G.set_field(field) G.diffusion() sfield = G.get_field() assert sfield[555] == 0 assert sfield[554] == 1 assert np.abs(sfield[566] - np.sqrt(2)) < 1.e-7 assert np.abs(sfield[446] - np.sqrt(3)) < 1.e-7 def test_smooth_2(): G = basic_graph() field = np.zeros((1000, 1)) field[555, 0] = 1 G.set_field(field) G.diffusion(1) sfield = G.get_field() assert sfield[555] == 0 assert sfield[554] == 1 assert np.abs(sfield[566] - np.sqrt(2)) < 1.e-7 assert np.abs(sfield[446] - np.sqrt(3)) < 1.e-7 def test_dilation(): myfield = basic_field() myfield.field[555] = 30 myfield.field[664] = 0 myfield.dilation(2) assert myfield.field[737] == 30 assert myfield.field[0] == 6 assert myfield.field[999] == 27 assert myfield.field[664] == 30 def test_dilation2(): # test equality of cython and python versions myfield = basic_field() myfield.field[555] = 30 myfield.field[664] = 0 h = myfield.copy() h.dilation(2) g = myfield.copy() g.dilation(2, False) assert_array_equal(h.field, g.field) def test_erosion(): myfield = basic_field() myfield.field[555] = 30 myfield.field[664] = 0 myfield.erosion(2) field = myfield.get_field() assert field[737] == 11 assert field[0] == 0 assert field[999] == 21 assert field[664] == 0 def test_opening(): myfield = basic_field() myfield.field[555] = 30 myfield.field[664] = 0 myfield.opening(2) field = myfield.get_field() assert field[737] == 17 assert field[0] == 0 assert field[999] == 21 assert field[555] == 16 def test_closing(): myfield = basic_field() myfield.field[555] = 30 myfield.field[664] = 0 myfield.closing(2) field = myfield.get_field() assert field[737] == 17 assert field[0] == 6 assert field[999] == 27 assert field[555] == 30 def test_watershed_1(): myfield = basic_field() myfield.field[555] = 28 myfield.field[664] = 0 idx, label = myfield.custom_watershed() assert np.size(idx) == 2 assert tuple(idx) == (555, 999) assert (label[776], label[666], label[123]) == (1, 0, 0) def test_watershed_4(): myfield = basic_field_3() idx, label = myfield.custom_watershed() assert np.size(idx) == 9 assert np.unique( [label[555], label[0], label[9], label[90], label[99], label[900], label[909], label[990], label[999]]).size == 9 def test_watershed_2(): myfield = basic_field_2() myfield.field[555] = 10 myfield.field[664] = 0 idx, label = myfield.custom_watershed() assert np.size(idx) == 9 def test_watershed_3(): myfield = basic_field_2() myfield.field[555] = 10 myfield.field[664] = 0 idx, label = myfield.custom_watershed(0,11) assert np.size(idx)==8 def test_bifurcations_1(): myfield = basic_field() idx, parent,label = myfield.threshold_bifurcations() assert idx == 999 assert parent == 0 def test_bifurcations_2(): myfield = basic_field_2() idx, parent, label = myfield.threshold_bifurcations() assert np.size(idx) == 15 def test_geodesic_kmeans(nbseeds=3): # Test the geodisc k-means algorithm myfield = basic_field_random(5, 5, 1) seeds = np.argsort(nr.rand(myfield.V))[:nbseeds] seeds, label, inertia = myfield.geodesic_kmeans(seeds) assert_array_equal(label[seeds], np.arange(nbseeds)) assert np.array([i in np.unique(label) for i in np.arange(nbseeds)]).all() def test_constrained_voronoi(nbseeds=3): # Test the geodisc k-means algorithm myfield = basic_field_random() seeds = np.argsort(nr.rand(myfield.V))[:nbseeds] label = myfield.constrained_voronoi(seeds) assert_array_equal(label[seeds], np.arange(nbseeds)) assert np.array([i in np.unique(label) for i in np.arange(nbseeds)]).all() def test_constrained_voronoi_2(nbseeds=3): # Test the geodisc k-means algorithm xyz, x = np.zeros((30, 3)), np.arange(30) xyz[:, 0] = x y = np.array((x // 10), np.float64) myfield = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 6), y) seeds = np.array([1, 18, 25]) label = myfield.constrained_voronoi(seeds) assert_array_equal(label, x // 10) def test_subfield(): myfield = basic_field_random() valid = nr.rand(myfield.V) > 0.1 sf = myfield.subfield(valid) assert sf.V == np.sum(valid) def test_subfield2(): myfield = basic_field_random() valid = np.zeros(myfield.V) sf = myfield.subfield(valid) assert sf is None def test_ward1(): myfield = basic_field_random() lab, J = myfield.ward(10) assert lab.max() == 9 def test_ward2(): myfield = basic_field_random() Lab, J1 = myfield.ward(5) Lab, J2 = myfield.ward(10) assert J1 > J2 def test_field_from_coo_matrix(): import scipy.sparse as sps V = 10 a = np.random.rand(V, V) > .9 fi = field_from_coo_matrix_and_data(sps.coo_matrix(a), a) assert fi.E == a.sum() nipy-0.6.1/nipy/algorithms/graph/tests/test_forest.py000066400000000000000000000065151470056100100230040ustar00rootroot00000000000000 import numpy as np from ..forest import Forest def simple_forest(): """ generate a simple forest """ parents = np.array([2, 2, 4, 4, 4]) F = Forest(5, parents) return F def test_forest(): """ test creation of forest object """ F = simple_forest() assert F.E == 8 assert F.cc().max() == 0 def test_forest_trivial(): """ test creation of forest object """ F = Forest(5) assert F.E == 0 assert (F.cc() == np.arange(5)).all() def test_children(): """ test that we obtain children """ sf = simple_forest() ch = sf.get_children() assert len(ch) == 5 assert ch[0] == [] assert ch[1] == [] assert ch[2] == [0, 1] assert ch[3] == [] assert ch[4] == [2, 3] def test_descendants(): """ test the get_descendants() method """ sf = simple_forest() assert sf.get_descendants(0) == [0] assert sf.get_descendants(1) == [1] assert sf.get_descendants(2) == [0, 1, 2] assert sf.get_descendants(4) == [0, 1, 2, 3, 4] def test_root(): """ test the isroot() method """ root = simple_forest().isroot() assert root[4] == True assert root.sum() == 1 def test_merge_simple_branches(): """ test the merge_simple_branches() method """ f = Forest(5, np.array([2, 2, 4, 4, 4])).merge_simple_branches() assert f.V == 5 f = Forest(5, np.array([1, 2, 4, 4, 4])).merge_simple_branches() assert f.V == 3 def test_all_distances(): """ test the all_distances() methods """ f = simple_forest() dg = f.all_distances() print(dg) assert dg[0, 3] == 3. assert dg.max() == 3. assert dg.min() == 0. assert dg.shape == (5, 5) dg = f.all_distances(1) assert dg[3] == 3. def test_depth(): """ test the depth_from_leaves() methods """ f = simple_forest() depth = f.depth_from_leaves() assert depth[0] == 0 assert depth[1] == 0 assert depth[3] == 0 assert depth[2] == 1 assert depth[4] == 2 def test_reorder(): """ test the reorder_from_leaves_to_roots() method """ f = simple_forest() order = f.reorder_from_leaves_to_roots() assert (f.depth_from_leaves() == np.array([0, 0, 0, 1, 2])).all() assert (order == np.array([0, 1, 3, 2, 4])).all() def test_leaves(): """ test the leaves_of_a_subtree() method """ f = simple_forest() assert f.leaves_of_a_subtree([0, 1]) == True assert f.leaves_of_a_subtree([0, 3]) == False assert f.leaves_of_a_subtree([1, 3]) == False assert f.leaves_of_a_subtree([0, 1, 3]) == True assert f.leaves_of_a_subtree([1]) == True def test_depth(): """ Test the tree_depth() method """ f = simple_forest() assert f.tree_depth() == 3 def test_upward_and(): """ test the propagate_upward_and() method """ f = simple_forest() assert(f.propagate_upward_and([0, 1, 0, 1, 0]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([0, 1, 1, 1, 0]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([0, 1, 1, 1, 1]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([1, 1, 0, 1, 0]) == [1, 1, 1, 1, 1]).all() def test_upward(): """ test the propagate_upward() method """ f = simple_forest() assert(f.propagate_upward([0, 0, 1, 3, 1]) == [0, 0, 0, 3, 1]).all() assert(f.propagate_upward([0, 0, 5, 0, 2]) == [0, 0, 0, 0, 0]).all() nipy-0.6.1/nipy/algorithms/graph/tests/test_graph.py000066400000000000000000000307401470056100100226000ustar00rootroot00000000000000 import numpy as np import numpy.random as nr from numpy.testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from ..graph import ( WeightedGraph, complete_graph, concatenate_graphs, eps_nn, knn, mst, wgraph_from_3d_grid, wgraph_from_adjacency, wgraph_from_coo_matrix, ) def basicdata(): x = np.array( [[- 1.998, - 2.024], [- 0.117, - 1.010], [1.099, - 0.057], [ 1.729, - 0.252], [1.003, - 0.021], [1.703, - 0.739], [- 0.557, 1.382],[- 1.200, - 0.446],[- 0.331, - 0.256], [- 0.800, - 1.584]]) return x def basic_graph(): l = np.linspace(0, 2 * np.pi, 20, endpoint=False) x = np.column_stack((np.cos(l), np.sin(l))) G = knn(x, 2) return G def basic_graph_2(): l = np.linspace(0, 2 * np.pi, 20, endpoint=False) x = np.column_stack((np.cos(l), np.sin(l))) G = knn(x, 2) return G, x def test_complete(): v = 10 G = complete_graph(v) a = G.get_edges()[:, 0] b = G.get_edges()[:, 1] inds = np.indices((v, v)).reshape( (2, v * v) ) assert_array_equal(inds, (a, b)) def test_knn_1(): x = basicdata() G = knn(x, 1) A = G.get_edges()[:, 0] assert np.shape(A)[0] == 14 def test_set_euclidian(): G, x = basic_graph_2() d = G.weights G.set_euclidian(x / 10) D = G.weights assert np.allclose(D, d / 10, 1e-7) def test_set_gaussian(): G, x = basic_graph_2() d = G.weights G.set_gaussian(x, 1.0) D = G.weights assert np.allclose(D, np.exp(- d * d / 2), 1e-7) def test_set_gaussian_2(): G, x = basic_graph_2() d = G.weights G.set_gaussian(x) D = G.weights sigma = np.sum(d * d) / len(d) assert np.allclose(D, np.exp(-d * d / (2 * sigma)), 1e-7) def test_eps_1(): x = basicdata() G = eps_nn(x, 1.) D = G.weights assert np.size(D) == 16 assert (D < 1).all() def test_mst_1(): x = basicdata() G = mst(x) D = G.weights assert np.size(D) == 18 def test_3d_grid(): """test the 6nn graph """ x0 = np.array([0, 0, 0]) x1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1]]) x2 = np.array([[1, 1, 0], [0, 1, 1], [1, 0, 1], [1, -1, 0], [0, 1, -1], [1, 0, -1], [-1, 1, 0], [0, -1, 1], [-1, 0, 1], [-1, -1, 0], [-1, 0, -1], [0, -1, -1]]) x3 = np.array([[1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1], [-1, 1, 1], [-1, 1, -1], [-1, -1, 1], [-1, -1, -1]]) for x in x1: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 2 assert wgraph_from_3d_grid(xyz, 18).E == 2 assert wgraph_from_3d_grid(xyz, 26).E == 2 for x in x2: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 0 assert wgraph_from_3d_grid(xyz, 18).E == 2 assert wgraph_from_3d_grid(xyz, 26).E == 2 for x in x3: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 0 assert wgraph_from_3d_grid(xyz, 18).E == 0 assert wgraph_from_3d_grid(xyz, 26).E == 2 def test_grid_3d_1(): """ Test the 6 nn graphs on 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz, (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 6) assert G.E == 186 def test_grid_3d_2(): """ Test the 18-nn graph on a 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz,(3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 18) assert G.E == 346 def test_grid_3d_3(): """ Test the 26-nn graph on a 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz,(3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) assert G.E == 346 def test_grid_3d_4(): nx, ny, nz = 10, 10, 10 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) D = G.weights # 6 * 9 * 10 * 10 assert sum(D == 1) == 5400 # 26 * 8 ** 3 + 6 * 8 ** 2 * 17 + 12 * 8 * 11 + 8 * 7 assert np.size(D) == 20952 # 18 * 8 ** 3 + 6 * 8 ** 2 * 13 + 12 * 8 * 9 + 8 * 6 assert sum(D < 1.5) == 15120 def test_grid_3d_5(): nx, ny, nz = 5, 5, 5 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) D = G.weights.copy() G.set_euclidian(xyz) assert_array_almost_equal(G.weights, D) def test_grid_3d_6(): nx, ny, nz = 5, 5, 5 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix().tolil() assert len(adj.rows[63]) == 26 for i in [62, 64, 58, 68, 38, 88, 57, 67, 37, 87, 59, 69, 39, 89, 33, 83, 43, 93, 32, 82, 42, 92, 34, 84, 44, 94]: assert i in adj.rows[63] def test_grid_3d_7(): """ Check that the grid graph is symmetric """ xyz = np.array(np.where(np.random.rand(5, 5, 5) > 0.5)).T adj = wgraph_from_3d_grid(xyz, 6).to_coo_matrix() assert (adj - adj.T).nnz == 0 adj = wgraph_from_3d_grid(xyz, 18).to_coo_matrix() assert (adj - adj.T).nnz == 0 adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix() assert (adj - adj.T).nnz == 0 def test_cut_redundancies(): G = basic_graph() e = G.E edges = G.get_edges() weights = G.weights G.E = 2 * G.E G.edges = np.concatenate((edges, edges)) G.weights = np.concatenate((weights, weights)) K = G.cut_redundancies() assert K.E == e def test_degrees(): G = basic_graph() (r, l) = G.degrees() assert (r == 2).all() assert (l == 2).all() def test_normalize(): G = basic_graph() G.normalize() M = G.to_coo_matrix() sM = np.array(M.sum(1)).ravel() assert (np.abs(sM - 1) < 1.e-7).all() def test_normalize_2(): G = basic_graph() G.normalize(0) M = G.to_coo_matrix() sM = np.array(M.sum(1)).ravel() assert (np.abs(sM - 1) < 1.e-7).all() def test_normalize_3(): G = basic_graph() G.normalize(1) M = G.to_coo_matrix() sM = np.array(M.sum(0)).ravel() assert (np.abs(sM - 1) < 1.e-7).all() def test_adjacency(): G = basic_graph() M = G.to_coo_matrix() assert ( M.diagonal() == 0 ).all() A = M.toarray() assert ( np.diag(A, 1) != 0 ).all() assert ( np.diag(A, -1) != 0 ).all() def test_cc(): G = basic_graph() l = G.cc() L = np.array(l==0) assert L.all() def test_isconnected(): G = basic_graph() assert G.is_connected() def test_main_cc(): x = basicdata() G = knn(x, 1) l = G.cc() l = G.main_cc() assert np.size(l) == 6 def test_dijkstra(): """ Test dijkstra's algorithm """ G = basic_graph() l = G.dijkstra(0) assert np.abs(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7 def test_dijkstra_multiseed(): """ Test dijkstra's algorithm, multi_seed version """ G = basic_graph() l = G.dijkstra([0, 1]) assert np.abs(l[10] - 18 * np.sin(np.pi / 20)) < 1.e-7 def test_dijkstra2(): """ Test dijkstra's algorithm, API detail """ G = basic_graph() l = G.dijkstra() assert np.abs(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7 def test_compact_representation(): """ Test that the compact representation of the graph is indeed correct """ G = basic_graph() idx, ne, we = G.compact_neighb() assert len(idx) == 21 assert idx[0] == 0 assert idx[20] == G.E assert len(ne) == G.E assert len(we) == G.E def test_floyd_1(): """ Test Floyd's algo without seed """ G = basic_graph() l = G.floyd() for i in range(10): plop = np.abs(np.diag(l, i) - 2 * i * np.sin(2 * np.pi / 40)) assert plop.max() < 1.e-4 def test_floyd_2(): """ Test Floyd's algo, with seed """ G = basic_graph() seeds = np.array([0,10]) l = G.floyd(seeds) for i in range(10): plop = np.abs(l[0, i] - 2 * i * np.sin(2 * np.pi / 40)) assert plop.max() < 1.e-4 plop = np.abs(l[0,19 - i] - 2 * (i + 1) * np.sin(2 * np.pi / 40)) assert plop.max() < 1.e-4 for i in range(10): plop = np.abs(l[1, i] - 2 * (10 - i) * np.sin(2 * np.pi / 40)) assert plop.max() < 1.e-4 plop = np.abs(l[1, 19 - i] - 2 * (9 - i) * np.sin(2 * np.pi / 40)) assert plop.max() < 1.e-4 def test_symmeterize(): a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) edges = np.vstack((a, b)).T d = np.ones(14) G = WeightedGraph(7, edges, d) G.symmeterize() d = G.weights assert (d == 0.5).all() def test_voronoi(): """ test voronoi labelling with 2 seeds """ a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]); edges = np.transpose(np.vstack((a, b))) G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0, 6]) label = G.voronoi_labelling(seed) assert label[1] == 0 def test_voronoi2(): """ test voronoi labelling with one seed """ a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]); edges = np.vstack((a, b)).T G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0]) label = G.voronoi_labelling(seed) assert label[4] == 0 def test_voronoi3(): """ test voronoi labelling with non-connected components """ a = np.array([0, 1, 2, 5, 6]) b = np.array([1, 2, 3, 6, 0]) d = np.array([1, 1, 1, 1, 1]); edges = np.vstack((a, b)).T G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0]) label = G.voronoi_labelling(seed) assert label[4] == - 1 def test_concatenate1(n=10): x1 = nr.randn(n, 2) x2 = nr.randn(n, 2) G1 = knn(x1, 5) G2 = knn(x2, 5) G = concatenate_graphs(G1, G2) assert G.cc().max() > 0 def test_concatenate2(n=10): G1 = complete_graph(n) G2 = complete_graph(n) G = concatenate_graphs(G1, G2) assert G.cc().max() == 1 def test_anti_symmeterize(): n = 10 eps = 1.e-7 M = (nr.rand(n, n) > 0.7).astype(np.float64) C = M - M.T G = wgraph_from_adjacency(M) G.anti_symmeterize() A = G.to_coo_matrix() assert np.sum(C - A) ** 2 < eps def test_subgraph_1(n=10): x = nr.randn(n, 2) G = WeightedGraph(x.shape[0]) valid = np.zeros(n) assert(G.subgraph(valid) is None) def test_subgraph_2(n=10): x = nr.randn(n, 2) G = knn(x, 5) valid = np.zeros(n) valid[:n // 2] = 1 assert G.subgraph(valid).edges.max() < n / 2 def test_graph_create_from_array(): """Test the creation of a graph from a sparse coo_matrix """ a = np.random.randn(5, 5) wg = wgraph_from_adjacency(a) b = wg.to_coo_matrix() assert_array_equal(a, b.todense()) def test_graph_create_from_coo_matrix(): """Test the creation of a graph from a sparse coo_matrix """ import scipy.sparse as spp a = (np.random.randn(5, 5) > .8).astype(np.float64) s = spp.coo_matrix(a) wg = wgraph_from_coo_matrix(s) b = wg.to_coo_matrix() assert_array_equal(b.todense(), a) def test_to_coo_matrix(): """ Test the generation of a sparse matrix as output """ a = (np.random.randn(5, 5)>.8).astype(np.float64) wg = wgraph_from_adjacency(a) b = wg.to_coo_matrix().todense() assert_array_equal(a, b) def test_list_neighbours(): """ test the generation of neighbours list """ bg = basic_graph() nl = bg.list_of_neighbors() assert len(nl) == bg.V for ni in nl: assert len(ni) == 2 def test_kruskal(): """ test Kruskal's algor to thin the graph """ x = basicdata() dmax = np.sqrt((x ** 2).sum()) m = mst(x) g = eps_nn(x, dmax) k = g.kruskal() assert_almost_equal(k.weights.sum(), m.weights.sum()) def test_concatenate3(): """ test the graph concatenation utlitity """ bg = basic_graph() cg = concatenate_graphs(bg, bg) valid = np.zeros(cg.V) valid[:bg.V] = 1 sg = cg.subgraph(valid) assert_array_equal(sg.edges, bg.edges) assert_array_equal(sg.weights, bg.weights) def test_cliques(): """ test the computation of cliques """ x = np.random.rand(20, 2) x[15:] += 2. g = knn(x, 5) g.set_gaussian(x, 1.) cliques = g.cliques() assert len(np.unique(cliques)) > 1 nipy-0.6.1/nipy/algorithms/group/000077500000000000000000000000001470056100100167535ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/group/__init__.py000066400000000000000000000002601470056100100210620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .parcel_analysis import ParcelAnalysis, parcel_analysis nipy-0.6.1/nipy/algorithms/group/parcel_analysis.py000066400000000000000000000514261470056100100225060ustar00rootroot00000000000000""" Parcel-based group analysis of multi-subject image data. Routines implementing Bayesian inference on group-level effects assumed to be constant within given brain parcels. The model accounts for both estimation errors and localization uncertainty in reference space of first-level images. See: Keller, Merlin et al (2008). Dealing with Spatial Normalization Errors in fMRI Group Inference using Hierarchical Modeling. *Statistica Sinica*; 18(4). Keller, Merlin et al (2009). Anatomically Informed Bayesian Model Selection for fMRI Group Data Analysis. *In MICCAI'09, Lecture Notes in Computer Science*; 5762:450--457. Roche, Alexis (2012). OHBM'12 talk, slides at: https://sites.google.com/site/alexisroche/slides/Talk_Beijing12.pdf """ import warnings from os.path import join import numpy as np import scipy.ndimage as nd import scipy.stats as ss from nibabel import io_orientation from ... import save_image from ...core.image.image_spaces import make_xyz_image, xyz_affine from ..kernel_smooth import fwhm2sigma from ..registration import resample from ..statistics.bayesian_mixed_effects import two_level_glm from ..statistics.histogram import histogram SIGMA_MIN = 1e-5 NDIM = 3 # This will work for 3D images def _gaussian_filter(x, msk, sigma): """ Smooth a multidimensional array `x` using a Gaussian filter with axis-wise standard deviations given by `sigma`, after padding `x` with zeros within a mask `msk`. """ x[msk] = 0. gx = nd.gaussian_filter(x, sigma) norma = 1 - nd.gaussian_filter(msk.astype(float), sigma) gx[~msk] /= norma[~msk] gx[msk] = 0. return gx def _gaussian_energy_1d(sigma): """ Compute the integral of a one-dimensional squared three-dimensional Gaussian kernel with axis-wise standard deviation `sigma`. """ mask_half_size = np.ceil(5 * sigma).astype(int) mask_size = 2 * mask_half_size + 1 x = np.zeros(mask_size) x[mask_half_size] = 1 y = nd.gaussian_filter1d(x, sigma) K = np.sum(y ** 2) / np.sum(y) return K def _gaussian_energy(sigma): """ Compute the integral of a squared three-dimensional Gaussian kernel with axis-wise standard deviations `sigma`. """ sigma = np.asarray(sigma) if sigma.size == 1: sigma = np.repeat(sigma, NDIM) # Use kernel separability to save memory return np.prod([_gaussian_energy_1d(s) for s in sigma]) def _smooth(con, vcon, msk, sigma): """ Integrate spatial uncertainty in standard space assuming that localization errors follow a zero-mean Gaussian distribution with axis-wise standard deviations `sigma` in voxel units. The expected Euclidean norm of registration errors is sqrt(NDIM) * sigma. """ scon = _gaussian_filter(con, msk, sigma) svcon = _gaussian_filter(con ** 2, msk, sigma) - scon ** 2 if vcon is not None: svcon += _gaussian_filter(vcon, msk, sigma) return scon, svcon def _smooth_spm(con, vcon, msk, sigma): """ Given a contrast image `con` and the corresponding variance image `vcon`, both assumed to be estimated from non-smoothed first-level data, compute what `con` and `vcon` would have been had the data been smoothed with a Gaussian kernel. """ scon = _gaussian_filter(con, msk, sigma) K = _gaussian_energy(sigma) if vcon is not None: svcon = K * _gaussian_filter(vcon, msk, sigma / np.sqrt(2)) else: svcon = np.zeros(con.shape) return scon, svcon def _smooth_image_pair(con_img, vcon_img, sigma, method='default'): """ Smooth an input image and associated variance image using either the spatial uncertainty accounting method consistent with Keller et al's model, or the SPM approach. """ if method == 'default': smooth_fn = _smooth elif method == 'spm': smooth_fn = _smooth_spm else: raise ValueError('Unknown smoothing method') con = con_img.get_fdata() if vcon_img is not None: vcon = con_img.get_fdata() else: vcon = None msk = np.isnan(con) scon, svcon = smooth_fn(con, vcon, msk, sigma) scon_img = make_xyz_image(scon, xyz_affine(con_img), con_img.reference) svcon_img = make_xyz_image(svcon, xyz_affine(con_img), con_img.reference) return scon_img, svcon_img def _save_image(img, path): try: save_image(img, path) except: warnings.warn(f'Could not write image: {path}', UserWarning) class ParcelAnalysis: def __init__(self, con_imgs, parcel_img, parcel_info=None, msk_img=None, vcon_imgs=None, design_matrix=None, cvect=None, fwhm=8, smooth_method='default', res_path=None, write_smoothed_images=False): """ Bayesian parcel-based analysis. Given a sequence of independent images registered to a common space (for instance, a set of contrast images from a first-level fMRI analysis), perform a second-level analysis assuming constant effects throughout parcels defined from a given label image in reference space. Specifically, a model of the following form is assumed: Y = X * beta + variability, where Y denotes the input image sequence, X is a design matrix, and beta are parcel-wise parameter vectors. The algorithm computes the Bayesian posterior probability of beta in each parcel using an expectation propagation scheme. Parameters ---------- con_imgs: sequence of nipy-like images Images input to the group analysis. parcel_img: nipy-like image Label image where each label codes for a parcel. parcel_info: sequence of arrays, optional A sequence of two arrays with same length equal to the number of distinct parcels consistently with the `parcel_img` argument. The first array gives parcel names and the second, parcel values, i.e., corresponding intensities in the associated parcel image. By default, parcel values are taken as `np.unique(parcel_img.get_fdata())` and parcel names are these values converted to strings. msk_img: nipy-like image, optional Binary mask to restrict analysis. By default, analysis is carried out on all parcels with nonzero value. vcon_imgs: sequence of nipy-like images, optional First-level variance estimates corresponding to `con_imgs`. This is useful if the input images are "noisy". By default, first-level variances are assumed to be zero. design_matrix: array, optional If None, a one-sample analysis model is used. Otherwise, an array with shape (n, p) where `n` matches the number of input scans, and `p` is the number of regressors. cvect: array, optional Contrast vector of interest. The method makes an inference on the contrast defined as the dot product cvect'*beta, where beta are the unknown parcel-wise effects. If None, `cvect` is assumed to be np.array((1,)). However, the `cvect` argument is mandatory if `design_matrix` is provided. fwhm: float, optional A parameter that represents the localization uncertainty in reference space in terms of the full width at half maximum of an isotropic Gaussian kernel. smooth_method: str, optional One of 'default' and 'spm'. Setting `smooth_method=spm` results in simply smoothing the input images using a Gaussian kernel, while the default method involves more complex smoothing in order to propagate spatial uncertainty into the inference process. res_path: str, optional An existing path to write output images. If None, no output is written. write_smoothed_images: bool, optional Specify whether smoothed images computed throughout the inference process are to be written on disk in `res_path`. """ self.smooth_method = smooth_method self.con_imgs = con_imgs self.vcon_imgs = vcon_imgs self.n_subjects = len(con_imgs) if self.vcon_imgs is not None: if not self.n_subjects == len(vcon_imgs): raise ValueError('List of contrasts and variances' ' do not have the same length') if msk_img is None: self.msk = None else: self.msk = msk_img.get_fdata().astype(bool).squeeze() self.res_path = res_path # design matrix if design_matrix is None: self.design_matrix = np.ones(self.n_subjects) self.cvect = np.ones((1,)) if cvect is not None: raise ValueError('No contrast vector expected') else: self.design_matrix = np.asarray(design_matrix) if cvect is None: raise ValueError('`cvect` cannot be None with' ' provided design matrix') self.cvect = np.asarray(cvect) if not self.design_matrix.shape[0] == self.n_subjects: raise ValueError('Design matrix shape is inconsistent' ' with number of input images') if not len(self.cvect) == self.design_matrix.shape[1]: raise ValueError('Design matrix shape is inconsistent' ' with provided `cvect`') # load the parcellation and resample it at the appropriate # resolution self.reference = parcel_img.reference self.parcel_full_res = parcel_img.get_fdata().astype('uintp').squeeze() self.affine_full_res = xyz_affine(parcel_img) parcel_img = make_xyz_image(self.parcel_full_res, self.affine_full_res, self.reference) self.affine = xyz_affine(self.con_imgs[0]) parcel_img_rsp = resample(parcel_img, reference=(self.con_imgs[0].shape, self.affine), interp_order=0) self.parcel = parcel_img_rsp.get_fdata().astype('uintp').squeeze() if self.msk is None: self.msk = self.parcel > 0 # get parcel labels and values if parcel_info is None: self._parcel_values = np.unique(self.parcel) self._parcel_labels = self._parcel_values.astype(str) else: self._parcel_labels = np.asarray(parcel_info[0]).astype(str) self._parcel_values = np.asarray(parcel_info[1]) # determine smoothing kernel size, which involves converting # the input full-width-at-half-maximum parameter given in mm # to standard deviation in voxel units. orient = io_orientation(self.affine)[:, 0].astype(int) # `orient` is an array, so this slicing leads to advanced indexing. voxsize = np.abs(self.affine[orient, list(range(3))]) self.sigma = np.maximum(fwhm2sigma(fwhm) / voxsize, SIGMA_MIN) # run approximate belief propagation self._smooth_images(write_smoothed_images) self._voxel_level_inference() self._parcel_level_inference() def _smooth_images(self, write): """ Smooth input contrast images to account for localization uncertainty in reference space. """ cons, vcons = [], [] for i in range(self.n_subjects): con = self.con_imgs[i] if self.vcon_imgs is not None: vcon = self.vcon_imgs[i] else: vcon = None scon, svcon = _smooth_image_pair(con, vcon, self.sigma, method=self.smooth_method) if write and self.res_path is not None: _save_image(scon, join(self.res_path, 'scon' + str(i) + '.nii.gz')) _save_image(svcon, join(self.res_path, 'svcon' + str(i) + '.nii.gz')) cons += [scon.get_fdata()[self.msk]] vcons += [svcon.get_fdata()[self.msk]] self.cons = np.array(cons) self.vcons = np.array(vcons) def _voxel_level_inference(self, mfx=True): """ Estimate voxel-level group parameters using mixed effects variational Bayes algorithm. """ beta, s2, dof = two_level_glm(self.cons, self.vcons, self.design_matrix) self.beta = np.dot(self.cvect, beta) if self.design_matrix.ndim == 1: self.vbeta = s2 * (self.cvect[0] ** 2\ / np.sum(self.design_matrix ** 2)) else: tmp = np.linalg.inv(np.dot(self.design_matrix.T, self.design_matrix)) self.vbeta = s2 * np.dot(self.cvect.T, np.dot(tmp, self.cvect)) self.dof = dof def _parcel_level_inference(self): """ Estimate parcel-level group parameters using mixed effects variational Bayes algorithm. """ parcel_masked = self.parcel[self.msk] values = np.where(histogram(parcel_masked) > 0)[0][1:] prob = np.zeros(len(values)) mu = np.zeros(len(values)) s2 = np.zeros(len(values)) dof = np.zeros(len(values)) labels = [] # For each parcel, estimate parcel-level parameters using a # mxf model for i in range(len(values)): mask = parcel_masked == values[i] y = self.beta[mask] vy = self.vbeta[mask] npts = y.size try: mu[i], s2[i], dof[i] = two_level_glm(y, vy, np.ones(npts)) prob[i] = ss.t.cdf(float(mu[i] / np.sqrt(s2[i] / npts)), dof[i]) except: prob[i] = 0 idx = int(np.where(self._parcel_values == values[i])[0]) labels += [self._parcel_labels[idx]] # Sort labels by ascending order of mean values I = np.argsort(-mu) self.parcel_values = values[I] self.parcel_labels = np.array(labels)[I] self.parcel_prob = prob[I] self.parcel_mu = mu[I] self.parcel_s2 = s2[I] self.parcel_dof = dof[I] def dump_results(self, path=None): """ Save parcel analysis information in NPZ file. """ if path is None and self.res_path is not None: path = self.res_path else: path = '.' np.savez(join(path, 'parcel_analysis.npz'), values=self.parcel_values, labels=self.parcel_labels, prob=self.parcel_prob, mu=self.parcel_mu, s2=self.parcel_s2, dof=self.parcel_dof) def t_map(self): """ Compute voxel-wise t-statistic map. This map is different from what you would get from an SPM-style mass univariate analysis because the method accounts for both spatial uncertainty in reference space and possibly errors on first-level inputs (if variance images are provided). Returns ------- tmap_img: nipy image t-statistic map. """ tmap = np.zeros(self.msk.shape) beta = self.beta var = self.vbeta tmap[self.msk] = beta / np.sqrt(var) tmap_img = make_xyz_image(tmap, self.affine, self.reference) if self.res_path is not None: _save_image(tmap_img, join(self.res_path, 'tmap.nii.gz')) tmp = np.zeros(self.msk.shape) tmp[self.msk] = beta _save_image(make_xyz_image(tmp, self.affine, self.reference), join(self.res_path, 'beta.nii.gz')) tmp[self.msk] = var _save_image(make_xyz_image(tmp, self.affine, self.reference), join(self.res_path, 'vbeta.nii.gz')) return tmap_img def parcel_maps(self, full_res=True): """ Compute parcel-based posterior contrast means and positive contrast probabilities. Parameters ---------- full_res: boolean If True, the output images will be at the same resolution as the parcel image. Otherwise, resolution will match the first-level images. Returns ------- pmap_mu_img: nipy image Image of posterior contrast means for each parcel. pmap_prob_img: nipy image Corresponding image of posterior probabilities of positive contrast. """ if full_res: parcel = self.parcel_full_res affine = self.affine_full_res else: parcel = self.parcel affine = self.affine pmap_prob = np.zeros(parcel.shape) pmap_mu = np.zeros(parcel.shape) for label, prob, mu in zip(self.parcel_values, self.parcel_prob, self.parcel_mu): pmap_prob[parcel == label] = prob pmap_mu[parcel == label] = mu pmap_prob_img = make_xyz_image(pmap_prob, affine, self.reference) pmap_mu_img = make_xyz_image(pmap_mu, affine, self.reference) if self.res_path is not None: _save_image(pmap_prob_img, join(self.res_path, 'parcel_prob.nii.gz')) _save_image(pmap_mu_img, join(self.res_path, 'parcel_mu.nii.gz')) return pmap_mu_img, pmap_prob_img def parcel_analysis(con_imgs, parcel_img, msk_img=None, vcon_imgs=None, design_matrix=None, cvect=None, fwhm=8, smooth_method='default', res_path=None): """ Helper function for Bayesian parcel-based analysis. Given a sequence of independent images registered to a common space (for instance, a set of contrast images from a first-level fMRI analysis), perform a second-level analysis assuming constant effects throughout parcels defined from a given label image in reference space. Specifically, a model of the following form is assumed: Y = X * beta + variability, where Y denotes the input image sequence, X is a design matrix, and beta are parcel-wise parameter vectors. The algorithm computes the Bayesian posterior probability of cvect'*beta, where cvect is a given contrast vector, in each parcel using an expectation propagation scheme. Parameters ---------- con_imgs: sequence of nipy-like images Images input to the group analysis. parcel_img: nipy-like image Label image where each label codes for a parcel. msk_img: nipy-like image, optional Binary mask to restrict analysis. By default, analysis is carried out on all parcels with nonzero value. vcon_imgs: sequence of nipy-like images, optional First-level variance estimates corresponding to `con_imgs`. This is useful if the input images are "noisy". By default, first-level variances are assumed to be zero. design_matrix: array, optional If None, a one-sample analysis model is used. Otherwise, an array with shape (n, p) where `n` matches the number of input scans, and `p` is the number of regressors. cvect: array, optional Contrast vector of interest. The method makes an inference on the contrast defined as the dot product cvect'*beta, where beta are the unknown parcel-wise effects. If None, `cvect` is assumed to be np.array((1,)). However, the `cvect` argument is mandatory if `design_matrix` is provided. fwhm: float, optional A parameter that represents the localization uncertainty in reference space in terms of the full width at half maximum of an isotropic Gaussian kernel. smooth_method: str, optional One of 'default' and 'spm'. Setting `smooth_method=spm` results in simply smoothing the input images using a Gaussian kernel, while the default method involves more complex smoothing in order to propagate spatial uncertainty into the inference process. res_path: str, optional An existing path to write output images. If None, no output is written. Returns ------- pmap_mu_img: nipy image Image of posterior contrast means for each parcel. pmap_prob_img: nipy image Corresponding image of posterior probabilities of positive contrast. """ p = ParcelAnalysis(con_imgs, parcel_img, parcel_info=None, msk_img=msk_img, vcon_imgs=vcon_imgs, design_matrix=design_matrix, cvect=cvect, fwhm=fwhm, smooth_method=smooth_method, res_path=res_path) return p.parcel_maps() nipy-0.6.1/nipy/algorithms/group/tests/000077500000000000000000000000001470056100100201155ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/group/tests/__init__.py000066400000000000000000000000501470056100100222210ustar00rootroot00000000000000# Init to make test directory a package nipy-0.6.1/nipy/algorithms/group/tests/test_parcel_analysis.py000066400000000000000000000127451470056100100247100ustar00rootroot00000000000000 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import numpy as np import pytest from numpy.testing import assert_array_equal from ....core.image.image_spaces import make_xyz_image, xyz_affine from ..parcel_analysis import ParcelAnalysis, _smooth_image_pair, parcel_analysis NSUBJ = 10 NLABELS = 10 SIZE = (50, 50, 50) AFFINE = np.diag(np.concatenate((np.random.rand(3), np.ones((1,))))) def test_smooth_image_pair(): con_img = make_xyz_image(np.random.normal(0, 1, size=SIZE), AFFINE, 'talairach') vcon_img = make_xyz_image(np.random.normal(0, 1, size=SIZE), AFFINE, 'talairach') for sigma in (1, (1, 1.2, 0.8)): for method in ('default', 'spm'): scon_img, svcon_img = _smooth_image_pair(con_img, vcon_img, sigma, method=method) pytest.raises(ValueError, _smooth_image_pair, con_img, vcon_img, 1, method='fsl') def make_fake_data(): con_imgs = [make_xyz_image(np.random.normal(0, 1, size=SIZE), AFFINE, 'talairach') for i in range(NSUBJ)] parcel_img = make_xyz_image(np.random.randint(NLABELS, size=SIZE), AFFINE, 'talairach') return con_imgs, parcel_img def _test_parcel_analysis(smooth_method, parcel_info, vcon=False, full_res=True): con_imgs, parcel_img = make_fake_data() if vcon: vcon_imgs = con_imgs else: vcon_imgs = None g = ParcelAnalysis(con_imgs, parcel_img, vcon_imgs=vcon_imgs, smooth_method=smooth_method, parcel_info=parcel_info) t_map_img = g.t_map() assert_array_equal(t_map_img.shape, SIZE) assert_array_equal(xyz_affine(t_map_img), AFFINE) parcel_mu_img, parcel_prob_img = g.parcel_maps(full_res=full_res) assert_array_equal(parcel_mu_img.shape, SIZE) assert_array_equal(xyz_affine(parcel_mu_img), AFFINE) assert_array_equal(parcel_prob_img.shape, SIZE) assert_array_equal(xyz_affine(parcel_prob_img), AFFINE) assert parcel_prob_img.get_fdata().max() <= 1 assert parcel_prob_img.get_fdata().min() >= 0 outside = parcel_img.get_fdata() == 0 assert_array_equal(t_map_img.get_fdata()[outside], 0) assert_array_equal(parcel_mu_img.get_fdata()[outside], 0) assert_array_equal(parcel_prob_img.get_fdata()[outside], 0) def test_parcel_analysis(): parcel_info = (list(range(NLABELS)), list(range(NLABELS))) _test_parcel_analysis('default', parcel_info) def test_parcel_analysis_nonstandard(): _test_parcel_analysis('default', None, vcon=True, full_res=False) def test_parcel_analysis_spm(): _test_parcel_analysis('spm', None) def test_parcel_analysis_nosmooth(): con_imgs, parcel_img = make_fake_data() msk_img = make_xyz_image(np.ones(SIZE, dtype='uint'), AFFINE, 'talairach') X = np.random.normal(0, 1, size=(NSUBJ, 5)) c = np.random.normal(0, 1, size=(5,)) g = ParcelAnalysis(con_imgs, parcel_img, msk_img=msk_img, design_matrix=X, cvect=c, fwhm=0) t_map = g.t_map().get_fdata() m_error = np.abs(np.mean(t_map)) v_error = np.abs(np.var(t_map) - (NSUBJ - 5) / float(NSUBJ - 7)) print(f'Errors: {m_error:f} (mean), {v_error:f} (var)') assert m_error < .1 assert v_error < .1 def _test_parcel_analysis_error(**kw): con_imgs, parcel_img = make_fake_data() return ParcelAnalysis(con_imgs, parcel_img, **kw) def test_parcel_analysis_error(): pytest.raises(ValueError, _test_parcel_analysis_error, vcon_imgs=list(range(NSUBJ + 1))) pytest.raises(ValueError, _test_parcel_analysis_error, cvect=np.ones(1)) pytest.raises(ValueError, _test_parcel_analysis_error, design_matrix=np.random.rand(NSUBJ, 2)) pytest.raises(ValueError, _test_parcel_analysis_error, design_matrix=np.random.rand(NSUBJ + 1, 2), cvect=np.ones(2)) pytest.raises(ValueError, _test_parcel_analysis_error, design_matrix=np.random.rand(NSUBJ, 2), cvect=np.ones(3)) def test_parcel_analysis_write_mode(): # find a subdirectory name that doesn't exist to check that # attempts to write in a non-existing directory do not raise # errors con_imgs, parcel_img = make_fake_data() subdirs = [o for o in os.listdir('.') if os.path.isdir(o)] res_path = 'a' while res_path in subdirs: res_path += 'a' p = ParcelAnalysis(con_imgs, parcel_img, res_path=res_path, write_smoothed_images=True) pytest.raises(IOError, p.dump_results) _ = p.t_map() _ = p.parcel_maps() def test_parcel_analysis_function(): con_imgs, parcel_img = make_fake_data() parcel_mu_img, parcel_prob_img = parcel_analysis(con_imgs, parcel_img) assert_array_equal(parcel_mu_img.shape, SIZE) assert_array_equal(xyz_affine(parcel_mu_img), AFFINE) assert_array_equal(parcel_prob_img.shape, SIZE) assert_array_equal(xyz_affine(parcel_prob_img), AFFINE) assert parcel_prob_img.get_fdata().max() <= 1 assert parcel_prob_img.get_fdata().min() >= 0 outside = parcel_img.get_fdata() == 0 assert_array_equal(parcel_mu_img.get_fdata()[outside], 0) assert_array_equal(parcel_prob_img.get_fdata()[outside], 0) nipy-0.6.1/nipy/algorithms/interpolation.py000066400000000000000000000071341470056100100210650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Image interpolators using ndimage. """ import tempfile import numpy as np from scipy.ndimage import map_coordinates, spline_filter from ..utils import seq_prod class ImageInterpolator: """ Interpolate Image instance at arbitrary points in world space The resampling is done with ``scipy.ndimage``. """ # Padding for prefilter calculation in 'nearest' and 'grid-constant' mode. # See: https://github.com/scipy/scipy/issues/13600 n_prepad_if_needed = 12 def __init__(self, image, order=3, mode='constant', cval=0.0): """ Parameters ---------- image : Image Image to be interpolated. order : int, optional order of spline interpolation as used in ``scipy.ndimage``. Default is 3. mode : str, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. cval : scalar, optional Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. """ # order and mode are read-only to allow pre-calculation of spline # filters. self.image = image self._order = order self._mode = mode self.cval = cval self._datafile = None self._n_prepad = 0 # Non-zero for 'nearest' and 'grid-constant' self._buildknots() @property def mode(self): """ Mode is read-only """ return self._mode @property def order(self): """ Order is read-only """ return self._order def _buildknots(self): data = np.nan_to_num(self.image.get_fdata()).astype(np.float64) if self.order > 1: if self.mode in ('nearest', 'grid-constant'): # See: https://github.com/scipy/scipy/issues/13600 self._n_prepad = self.n_prepad_if_needed if self._n_prepad != 0: data = np.pad(data, self._n_prepad, mode='edge') kwargs = {'order': self.order} kwargs['mode'] = self.mode data = spline_filter(data, **kwargs) self._datafile = tempfile.TemporaryFile() data.tofile(self._datafile) self._data = np.memmap(self._datafile, dtype=data.dtype, mode='r+', shape=data.shape) del(data) def evaluate(self, points): """ Resample image at points in world space Parameters ---------- points : array values in self.image.coordmap.output_coords. Each row is a point. Returns ------- V : ndarray interpolator of self.image evaluated at points """ points = np.array(points, np.float64) output_shape = points.shape[1:] points.shape = (points.shape[0], seq_prod(output_shape)) cmapi = self.image.coordmap.inverse() voxels = cmapi(points.T).T + self._n_prepad V = map_coordinates(self._data, voxels, order=self.order, mode=self.mode, cval=self.cval, prefilter=self.order < 2) # ndimage.map_coordinates returns a flat array, # it needs to be reshaped to the original shape V.shape = output_shape return V nipy-0.6.1/nipy/algorithms/kernel_smooth.py000066400000000000000000000204771470056100100210540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Linear filter(s). For the moment, only a Gaussian smoothing filter """ import gc import numpy as np import numpy.linalg as npl from numpy import fft from nipy.core.api import AffineTransform, Image from nipy.core.reference.coordinate_map import product from nipy.utils import seq_prod class LinearFilter: ''' A class to implement some FFT smoothers for Image objects. By default, this does a Gaussian kernel smooth. More choices would be better! ''' normalization = 'l1sum' def __init__(self, coordmap, shape, fwhm=6.0, scale=1.0, location=0.0, cov=None): """ Parameters ---------- coordmap : ``CoordinateMap`` shape : sequence fwhm : float, optional fwhm for Gaussian kernel, default is 6.0 scale : float, optional scaling to apply to data after smooth, default 1.0 location : float offset to apply to data after smooth and scaling, default 0 cov : None or array, optional Covariance matrix """ self.coordmap = coordmap self.bshape = shape self.fwhm = fwhm self.scale = scale self.location = location self.cov = cov self._setup_kernel() def _setup_kernel(self): if not isinstance(self.coordmap, AffineTransform): raise ValueError('for FFT smoothing, we need a ' 'regular (affine) coordmap') # voxel indices of array implied by shape voxels = np.indices(self.bshape).astype(np.float64) # coordinates of physical center. XXX - why the 'floor' here? vox_center = np.floor((np.array(self.bshape) - 1) / 2.0) phys_center = self.coordmap(vox_center) # reshape to (N coordinates, -1). We appear to need to assign # to shape instead of doing a reshape, in order to avoid memory # copies voxels.shape = (voxels.shape[0], seq_prod(voxels.shape[1:])) # physical coordinates relative to center X = (self.coordmap(voxels.T) - phys_center).T X.shape = (self.coordmap.ndims[1],) + tuple(self.bshape) # compute kernel from these positions kernel = self(X, axis=0) kernel = _crop(kernel) self.norms = {'l2':np.sqrt((kernel**2).sum()), 'l1':np.fabs(kernel).sum(), 'l1sum':kernel.sum()} self._kernel = kernel self.shape = (np.ceil( (np.asarray(self.bshape) + np.asarray(kernel.shape)) / 2) * 2 + 2).astype(np.intp) self.fkernel = np.zeros(self.shape) slices = [slice(0, kernel.shape[i]) for i in range(len(kernel.shape))] self.fkernel[tuple(slices)] = kernel self.fkernel = fft.rfftn(self.fkernel) return kernel def _normsq(self, X, axis=-1): """ Compute the (periodic, i.e. on a torus) squared distance needed for FFT smoothing. Assumes coordinate system is linear. Parameters ---------- X : array array of points axis : int, optional axis containing coordinates. Default -1 """ # copy X _X = np.array(X) # roll coordinate axis to front _X = np.rollaxis(_X, axis) # convert coordinates to FWHM units if self.fwhm != 1.0: f = fwhm2sigma(self.fwhm) if f.shape == (): f = np.ones(len(self.bshape)) * f for i in range(len(self.bshape)): _X[i] /= f[i] # whiten? if self.cov is not None: _chol = npl.cholesky(self.cov) _X = np.dot(npl.inv(_chol), _X) # compute squared distance D2 = np.sum(_X**2, axis=0) return D2 def __call__(self, X, axis=-1): ''' Compute kernel from points Parameters ---------- X : array array of points axis : int, optional axis containing coordinates. Default -1 ''' _normsq = self._normsq(X, axis) / 2. t = np.less_equal(_normsq, 15) return np.exp(-np.minimum(_normsq, 15)) * t def smooth(self, inimage, clean=False, is_fft=False): """ Apply smoothing to `inimage` Parameters ---------- inimage : ``Image`` The image to be smoothed. Should be 3D. clean : bool, optional Should we call ``nan_to_num`` on the data before smoothing? is_fft : bool, optional Has the data already been fft'd? Returns ------- s_image : `Image` New image, with smoothing applied """ if inimage.ndim == 4: # we need to generalize which axis to iterate over. By # default it should probably be the last. raise NotImplementedError('Smoothing volumes in a 4D series ' 'is broken, pending a rethink') _out = np.zeros(inimage.shape) # iterate over the first (0) axis - this is confusing - see # above nslice = inimage.shape[0] elif inimage.ndim == 3: nslice = 1 else: raise NotImplementedError('expecting either 3 or 4-d image') in_data = inimage.get_fdata() for _slice in range(nslice): if in_data.ndim == 4: data = in_data[_slice] elif in_data.ndim == 3: data = in_data[:] if clean: data = np.nan_to_num(data) if not is_fft: data = self._presmooth(data) data *= self.fkernel data = fft.irfftn(data) / self.norms[self.normalization] gc.collect() _dslice = [slice(0, self.bshape[i], 1) for i in range(3)] if self.scale != 1: data = self.scale * data[_dslice] if self.location != 0.0: data += self.location gc.collect() # Write out data if in_data.ndim == 4: _out[_slice] = data else: _out = data _slice += 1 gc.collect() slicer = tuple( slice(self._kernel.shape[i] // 2, self.bshape[i] + self._kernel.shape[i] // 2) for i in range(len(self.bshape))) _out = _out[slicer] if inimage.ndim == 3: return Image(_out, coordmap=self.coordmap) else: # This does not work as written. See above concat_affine = AffineTransform.identity('concat') return Image(_out, coordmap=product(self.coordmap, concat_affine)) def _presmooth(self, indata): slices = [slice(0, self.bshape[i], 1) for i in range(len(self.shape))] _buffer = np.zeros(self.shape) _buffer[tuple(slices)] = indata return fft.rfftn(_buffer) def fwhm2sigma(fwhm): """ Convert a FWHM value to sigma in a Gaussian kernel. Parameters ---------- fwhm : array-like FWHM value or values Returns ------- sigma : array or float sigma values corresponding to `fwhm` values Examples -------- >>> sigma = fwhm2sigma(6) >>> sigmae = fwhm2sigma([6, 7, 8]) >>> sigma == sigmae[0] True """ fwhm = np.asarray(fwhm) return fwhm / np.sqrt(8 * np.log(2)) def sigma2fwhm(sigma): """ Convert a sigma in a Gaussian kernel to a FWHM value Parameters ---------- sigma : array-like sigma value or values Returns ------- fwhm : array or float fwhm values corresponding to `sigma` values Examples -------- >>> fwhm = sigma2fwhm(3) >>> fwhms = sigma2fwhm([3, 4, 5]) >>> fwhm == fwhms[0] True """ sigma = np.asarray(sigma) return sigma * np.sqrt(8 * np.log(2)) def _crop(X, tol=1.0e-10): """ Find a bounding box for support of fabs(X) > tol and returned crop region. """ aX = np.fabs(X) n = len(X.shape) I = np.indices(X.shape)[:, np.greater(aX, tol)] if I.shape[1] > 0: m = [I[i].min() for i in range(n)] M = [I[i].max() for i in range(n)] slices = [slice(m[i], M[i]+1, 1) for i in range(n)] return X[tuple(slices)] else: return np.zeros((1,)*n) nipy-0.6.1/nipy/algorithms/meson.build000066400000000000000000000010221470056100100177540ustar00rootroot00000000000000target_dir = 'nipy/algorithms' python_sources = [ '__init__.py', 'fwhm.py', 'interpolation.py', 'kernel_smooth.py', 'optimize.py', 'resample.py', ] py.install_sources( python_sources, pure: false, subdir: target_dir ) pure_subdirs = [ 'clustering', 'diagnostics', 'group', 'slicetiming', 'tests', 'utils' ] foreach subdir: pure_subdirs install_subdir(subdir, install_dir: install_root / target_dir) endforeach subdir('graph') subdir('registration') subdir('segmentation') subdir('statistics') nipy-0.6.1/nipy/algorithms/optimize.py000066400000000000000000000055331470056100100200370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # add-ons to scipy.optimize import numpy as np from scipy.optimize import approx_fprime, brent def _linesearch_brent(func, p, xi, tol=1e-3): """Line-search algorithm using Brent's method. Find the minimum of the function ``func(x0+ alpha*direc)``. """ def myfunc(alpha): return func(p + alpha * xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) xi = alpha_min*xi return np.squeeze(fret), p+xi def _wrap(function, args): ncalls = [0] def wrapper(x): ncalls[0] += 1 return function(x, *args) return ncalls, wrapper def fmin_steepest(f, x0, fprime=None, xtol=1e-4, ftol=1e-4, maxiter=None, epsilon=1.4901161193847656e-08, callback=None, disp=True): """ Minimize a function using a steepest gradient descent algorithm. This complements the collection of minimization routines provided in scipy.optimize. Steepest gradient iterations are cheaper than in the conjugate gradient or Newton methods, hence convergence may sometimes turn out faster algthough more iterations are typically needed. Parameters ---------- f : callable Function to be minimized x0 : array Starting point fprime : callable Function that computes the gradient of f xtol : float Relative tolerance on step sizes in line searches ftol : float Relative tolerance on function variations maxiter : int Maximum number of iterations epsilon : float or ndarray If fprime is approximated, use this value for the step size (can be scalar or vector). callback : callable Optional function called after each iteration is complete disp : bool Print convergence message if True Returns ------- x : array Gradient descent fix point, local minimizer of f """ x = np.asarray(x0).flatten() fval = np.squeeze(f(x)) it = 0 if maxiter is None: maxiter = x.size*1000 if fprime is None: grad_calls, myfprime = _wrap(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = _wrap(fprime, args) while it < maxiter: it = it + 1 x0 = x fval0 = fval if disp: print('Computing gradient...') direc = myfprime(x) direc = direc / np.sqrt(np.sum(direc**2)) if disp: print('Performing line search...') fval, x = _linesearch_brent(f, x, direc, tol=xtol) if callback is not None: callback(x) if (2.0*(fval0-fval) <= ftol*(abs(fval0)+abs(fval))+1e-20): break if disp: print('Number of iterations: %d' % it) print(f'Minimum criterion value: {fval:f}') return x nipy-0.6.1/nipy/algorithms/registration/000077500000000000000000000000001470056100100203315ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/registration/NOTES_ELF000066400000000000000000000020721470056100100216330ustar00rootroot00000000000000 Notes neurospin/registration registration/ __init__.py registration.py iconic_registration (intensity based, joint histogram) renamed joint registration takes from and two images and compute joint histogram groupwise_registration.py (motion correction in fmri) register a set of images sum of square differences not using joint histogram affine.py (describes a general 3d affine transformation and its parametrization) class affine params=s(-1)xv12 s: pre_cond radius for the preconditioner is in translation coordinates check for rigidity class grid_transform.py (discrete displacements of the from grid) cubic_spline.c (same results as ndimage) wichmann_prng.c (only for the random interplation) iconic.c to be renamed to histogram.c interpolating the histogram avoids the problem of casting the intensity in C assumes the joint histogram is a signed short array (16bit) clamp Make independent tests with checks starting from different registrations. Sensible default for the focus function What should we do when outside the fov? nipy-0.6.1/nipy/algorithms/registration/TODO.txt000066400000000000000000000014211470056100100216350ustar00rootroot00000000000000* 'permuted' svd in affine.py * rename rotation, scaling, shearing appropriately * spline transform object * log-euclidean transform object ??? * Levenberg-Marquardt * Affine transform creation -------------------------------------------- Transform objects Transform | --> Affine | --> Rigid, Similarity, ... | --> GridTransform | --> SplineTransform | --> PolyAffine | --> PolyRigid, PolySimilarity, ... ChainTransform Any registration method should take a generic transform argument having an `apply` method and a `param` attribute or property. Internally, it may create a ChainTransform object to represent voxel-to-voxel transforms or other kinds of compositions. The transform supplied by the user should be optimizable (have a `param` attribute). nipy-0.6.1/nipy/algorithms/registration/__init__.py000066400000000000000000000015301470056100100224410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .affine import ( Affine, Affine2D, Rigid, Rigid2D, Similarity, Similarity2D, affine_transforms, inverse_affine, preconditioner, rotation_mat2vec, rotation_vec2mat, subgrid_affine, threshold, to_matrix44, ) from .groupwise_registration import ( FmriRealign4d, Image4d, Realign4d, Realign4dAlgorithm, SpaceTimeRealign, adjust_subsampling, interp_slice_times, make_grid, realign4d, resample4d, scanner_coords, single_run_realign4d, ) from .histogram_registration import ( HistogramRegistration, clamp, ideal_spacing, interp_methods, ) from .resample import resample from .scripting import aff2euler, space_time_realign nipy-0.6.1/nipy/algorithms/registration/_registration.h000066400000000000000000000000671470056100100233560ustar00rootroot00000000000000#define PY_ARRAY_UNIQUE_SYMBOL _registration_ARRAY_API nipy-0.6.1/nipy/algorithms/registration/_registration.pyx000066400000000000000000000176071470056100100237570ustar00rootroot00000000000000# -*- Mode: Python -*- """ Bindings for various image registration routines written in C: joint histogram computation, cubic spline interpolation, non-rigid transformations. """ __version__ = '0.3' # Set symbol for array_import; must come before cimport numpy cdef extern from "_registration.h": int PY_ARRAY_UNIQUE_SYMBOL # Includes from numpy cimport (import_array, ndarray, flatiter, broadcast, PyArray_MultiIterNew, PyArray_MultiIter_DATA, PyArray_MultiIter_NEXT) cdef extern from "joint_histogram.h": int joint_histogram(ndarray H, unsigned int clampI, unsigned int clampJ, flatiter iterI, ndarray imJ_padded, ndarray Tvox, int interp) int L1_moments(double* n, double* median, double* dev, ndarray H) cdef extern from "cubic_spline.h": void cubic_spline_transform(ndarray res, ndarray src) double cubic_spline_sample1d(double x, ndarray coef, int mode) double cubic_spline_sample2d(double x, double y, ndarray coef, int mode_x, int mode_y) double cubic_spline_sample3d(double x, double y, double z, ndarray coef, int mode_x, int mode_y, int mode_z) double cubic_spline_sample4d(double x, double y, double z, double t, ndarray coef, int mode_x, int mode_y, int mode_z, int mode_t) void cubic_spline_resample3d(ndarray im_resampled, ndarray im, double* Tvox, int mode_x, int mode_y, int mode_z) cdef extern from "polyaffine.h": void apply_polyaffine(ndarray XYZ, ndarray Centers, ndarray Affines, ndarray Sigma) # Initialize numpy import_array() import numpy as np # Globals modes = {'zero': 0, 'nearest': 1, 'reflect': 2} def _joint_histogram(ndarray H, flatiter iterI, ndarray imJ, ndarray Tvox, long interp): """ Compute the joint histogram given a transformation trial. """ cdef: double *h double *tvox unsigned int clampI unsigned int clampJ int ret # Views clampI = H.shape[0] clampJ = H.shape[1] # Compute joint histogram ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) if not ret == 0: raise RuntimeError('Joint histogram failed because of incorrect input arrays.') return def _L1_moments(ndarray H): """ Compute L1 moments of order 0, 1 and 2 of a one-dimensional histogram. """ cdef: double n[1] double median[1] double dev[1] int ret ret = L1_moments(n, median, dev, H) if not ret == 0: raise RuntimeError('L1_moments failed because input array is not double.') return n[0], median[0], dev[0] def _cspline_transform(ndarray x): c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) cubic_spline_transform(c, x) return c cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): shape = [sh_arr.shape[i] for i in range(sh_arr.ndim)] return np.reshape(in_arr, shape).astype(np.double) def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): cdef: double *r double *x broadcast multi Xa = _reshaped_double(X, R) multi = PyArray_MultiIterNew(2, R, Xa) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, mx='zero', my='zero'): cdef: double *r double *x double *y broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) multi = PyArray_MultiIterNew(3, R, Xa, Ya) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, mx='zero', my='zero', mz='zero'): cdef: double *r double *x double *y double *z broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) Za = _reshaped_double(Z, R) multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) z = PyArray_MultiIter_DATA(multi, 3) r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample4d(ndarray R, ndarray C, X=0, Y=0, Z=0, T=0, mx='zero', my='zero', mz='zero', mt='zero'): """ In-place cubic spline sampling. R.dtype must be 'double'. """ cdef: double *r double *x double *y double *z double *t broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) Za = _reshaped_double(Z, R) Ta = _reshaped_double(T, R) multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) z = PyArray_MultiIter_DATA(multi, 3) t = PyArray_MultiIter_DATA(multi, 4) r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) PyArray_MultiIter_NEXT(multi) return R def _cspline_resample3d(ndarray im_resampled, ndarray im, dims, ndarray Tvox, mx='zero', my='zero', mz='zero'): """ Perform cubic spline resampling of a 3d input image `im` into a grid with shape `dims` according to an affine transform represented by a 4x4 matrix `Tvox` that assumes voxel coordinates. Boundary conditions on each axis are determined by the keyword arguments `mx`, `my` and `mz`, respectively. Possible choices are: 'zero': assume zero intensity outside the target grid 'nearest': extrapolate intensity by the closest grid point along the axis 'reflect': extrapolate intensity by mirroring the input image along the axis Note that `Tvox` will be re-ordered in C convention if needed. """ cdef double *tvox # Ensure that the Tvox array is C-contiguous (required by the # underlying C routine) Tvox = np.asarray(Tvox, dtype='double', order='C') tvox = Tvox.data # Actual resampling cubic_spline_resample3d(im_resampled, im, tvox, modes[mx], modes[my], modes[mz]) return im_resampled def check_array(ndarray x, int dim, int exp_dim, xname): if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': raise ValueError('%s array should be double C-contiguous' % xname) if not dim == exp_dim: raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): check_array(xyz, xyz.shape[1], 3, 'xyz') check_array(centers, centers.shape[1], 3, 'centers') check_array(affines, affines.shape[1], 12, 'affines') check_array(sigma, sigma.size, 3, 'sigma') if not centers.shape[0] == affines.shape[0]: raise ValueError('centers and affines arrays should have same shape[0]') apply_polyaffine(xyz, centers, affines, sigma) nipy-0.6.1/nipy/algorithms/registration/affine.py000066400000000000000000000325771470056100100221510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.linalg as spl from nibabel.affines import apply_affine from transforms3d.quaternions import mat2quat, quat2axangle # Legacy repr printing from numpy. from .transform import Transform # Globals RADIUS = 100 MAX_ANGLE = 1e10 * 2 * np.pi SMALL_ANGLE = 1e-30 MAX_DIST = 1e10 LOG_MAX_DIST = np.log(MAX_DIST) TINY = float(np.finfo(np.double).tiny) def threshold(x, th): return np.maximum(np.minimum(x, th), -th) def rotation_mat2vec(R): """ Rotation vector from rotation matrix `R` Parameters ---------- R : (3,3) array-like Rotation matrix Returns ------- vec : (3,) array Rotation vector, where norm of `vec` is the angle ``theta``, and the axis of rotation is given by ``vec / theta`` """ ax, angle = quat2axangle(mat2quat(R)) return ax * angle def rotation_vec2mat(r): """ R = rotation_vec2mat(r) The rotation matrix is given by the Rodrigues formula: R = Id + sin(theta)*Sn + (1-cos(theta))*Sn^2 with: 0 -nz ny Sn = nz 0 -nx -ny nx 0 where n = r / ||r|| In case the angle ||r|| is very small, the above formula may lead to numerical instabilities. We instead use a Taylor expansion around theta=0: R = I + sin(theta)/tetha Sr + (1-cos(theta))/teta2 Sr^2 leading to: R = I + (1-theta2/6)*Sr + (1/2-theta2/24)*Sr^2 To avoid numerical instabilities, an upper threshold is applied to the angle. It is chosen to be a multiple of 2*pi, hence the resulting rotation is then the identity matrix. This strategy warrants that the output matrix is a continuous function of the input vector. """ theta = np.sqrt(np.sum(r ** 2)) if theta > MAX_ANGLE: return np.eye(3) elif theta > SMALL_ANGLE: n = r / theta Sn = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]]) R = np.eye(3) + np.sin(theta) * Sn\ + (1 - np.cos(theta)) * np.dot(Sn, Sn) else: Sr = np.array([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]]) theta2 = theta * theta R = np.eye(3) + (1 - theta2 / 6.) * Sr\ + (.5 - theta2 / 24.) * np.dot(Sr, Sr) return R def to_matrix44(t, dtype=np.double): """ T = to_matrix44(t) t is a vector of affine transformation parameters with size at least 6. size < 6 ==> error size == 6 ==> t is interpreted as translation + rotation size == 7 ==> t is interpreted as translation + rotation + isotropic scaling 7 < size < 12 ==> error size >= 12 ==> t is interpreted as translation + rotation + scaling + pre-rotation """ size = t.size T = np.eye(4, dtype=dtype) R = rotation_vec2mat(t[3:6]) if size == 6: T[0:3, 0:3] = R elif size == 7: T[0:3, 0:3] = t[6] * R else: S = np.diag(np.exp(threshold(t[6:9], LOG_MAX_DIST))) Q = rotation_vec2mat(t[9:12]) # Beware: R*s*Q T[0:3, 0:3] = np.dot(R, np.dot(S, Q)) T[0:3, 3] = threshold(t[0:3], MAX_DIST) return T def preconditioner(radius): """ Computes a scaling vector pc such that, if p=(u,r,s,q) represents affine transformation parameters, where u is a translation, r and q are rotation vectors, and s is the vector of log-scales, then all components of (p/pc) are roughly comparable to the translation component. To that end, we use a `radius` parameter which represents the 'typical size' of the object being registered. This is used to reformat the parameter vector (translation+rotation+scaling+pre-rotation) so that each element roughly represents a variation in mm. """ rad = 1. / radius sca = 1. / radius return np.array([1, 1, 1, rad, rad, rad, sca, sca, sca, rad, rad, rad]) def inverse_affine(affine): return spl.inv(affine) def slices2aff(slices): """ Return affine from start, step of sequence `slices` of slice objects Parameters ---------- slices : sequence of slice objects Returns ------- aff : ndarray If ``N = len(slices)`` then affine is shape (N+1, N+1) with diagonal given by the ``step`` attribute of the slice objects (where None corresponds to 1), and the `:N` elements in the last column are given by the ``start`` attribute of the slice objects Examples -------- >>> slices2aff([slice(None), slice(None)]) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> slices2aff([slice(2, 3, 4), slice(3, 4, 5), slice(4, 5, 6)]) array([[ 4., 0., 0., 2.], [ 0., 5., 0., 3.], [ 0., 0., 6., 4.], [ 0., 0., 0., 1.]]) """ starts = [s.start if s.start is not None else 0 for s in slices] steps = [s.step if s.step is not None else 1 for s in slices] aff = np.diag(steps + [1.]) aff[:-1, -1] = starts return aff def subgrid_affine(affine, slices): """ Return dot prodoct of `affine` and affine resulting from `slices` Parameters ---------- affine : array-like Affine to apply on right of affine resulting from `slices` slices : sequence of slice objects Slices generating (N+1, N+1) affine from ``slices2aff``, where ``N = len(slices)`` Returns ------- aff : ndarray result of ``np.dot(affine, slice_affine)`` where ``slice_affine`` is affine resulting from ``slices2aff(slices)``. Raises ------ ValueError : if the ``slice_affine`` contains non-integer values """ slices_aff = slices2aff(slices) if not np.all(slices_aff == np.round(slices_aff)): raise ValueError("Need integer slice start, step") return np.dot(affine, slices_aff) class Affine(Transform): param_inds = list(range(12)) def __init__(self, array=None, radius=RADIUS): self._direct = True self._precond = preconditioner(radius) if array is None: self._vec12 = np.zeros(12) return array = np.array(array) if array.size == 12: self._vec12 = array.ravel().copy() elif array.shape == (4, 4): self.from_matrix44(array) else: raise ValueError('Invalid array') def copy(self): new = self.__class__() new._direct = self._direct new._precond[:] = self._precond[:] new._vec12 = self._vec12.copy() return new def from_matrix44(self, aff): """ Convert a 4x4 matrix describing an affine transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for shearing when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[0:3] = aff[:3, 3] # Use SVD to find orthogonal and diagonal matrices such that # aff[0:3,0:3] == R*S*Q R, s, Q = spl.svd(aff[0:3, 0:3]) if spl.det(R) < 0: R = -R Q = -Q r = rotation_mat2vec(R) if spl.det(Q) < 0: Q = -Q self._direct = False q = rotation_mat2vec(Q) vec12[3:6] = r vec12[6:9] = np.log(np.maximum(s, TINY)) vec12[9:12] = q self._vec12 = vec12 def apply(self, xyz): return apply_affine(self.as_affine(), xyz) def _get_param(self): param = self._vec12 / self._precond return param[self.param_inds] def _set_param(self, p): p = np.asarray(p) inds = self.param_inds self._vec12[inds] = p * self._precond[inds] def _get_translation(self): return self._vec12[0:3] def _set_translation(self, x): self._vec12[0:3] = x def _get_rotation(self): return self._vec12[3:6] def _set_rotation(self, x): self._vec12[3:6] = x def _get_scaling(self): return np.exp(self._vec12[6:9]) def _set_scaling(self, x): self._vec12[6:9] = np.log(x) def _get_pre_rotation(self): return self._vec12[9:12] def _set_pre_rotation(self, x): self._vec12[9:12] = x def _get_direct(self): return self._direct def _get_precond(self): return self._precond translation = property(_get_translation, _set_translation) rotation = property(_get_rotation, _set_rotation) scaling = property(_get_scaling, _set_scaling) pre_rotation = property(_get_pre_rotation, _set_pre_rotation) is_direct = property(_get_direct) precond = property(_get_precond) param = property(_get_param, _set_param) def as_affine(self, dtype='double'): T = to_matrix44(self._vec12, dtype=dtype) if not self._direct: T[:3, :3] *= -1 return T def compose(self, other): """ Compose this transform onto another Parameters ---------- other : Transform transform that we compose onto Returns ------- composed_transform : Transform a transform implementing the composition of self on `other` """ # If other is not an Affine, use either its left compose # method, if available, or the generic compose method if not hasattr(other, 'as_affine'): if hasattr(other, 'left_compose'): return other.left_compose(self) else: return Transform(self.apply).compose(other) # Affine case: choose more capable of input types as output # type other_aff = other.as_affine() self_inds = set(self.param_inds) other_inds = set(other.param_inds) if self_inds.issubset(other_inds): klass = other.__class__ elif other_inds.isssubset(self_inds): klass = self.__class__ else: # neither one contains capabilities of the other klass = Affine a = klass() a._precond[:] = self._precond[:] a.from_matrix44(np.dot(self.as_affine(), other_aff)) return a def __str__(self): string = f'translation : {self.translation}\n' string += f'rotation : {self.rotation}\n' string += f'scaling : {self.scaling}\n' string += f'pre-rotation: {self.pre_rotation}' return string def inv(self): """ Return the inverse affine transform. """ a = self.__class__() a._precond[:] = self._precond[:] a.from_matrix44(spl.inv(self.as_affine())) return a class Affine2D(Affine): param_inds = [0, 1, 5, 6, 7, 11] class Rigid(Affine): param_inds = list(range(6)) def from_matrix44(self, aff): """ Convert a 4x4 matrix describing a rigid transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for pre-rotation when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[:3] = aff[:3, 3] R = aff[:3, :3] if spl.det(R) < 0: R = -R self._direct = False vec12[3:6] = rotation_mat2vec(R) vec12[6:9] = 0.0 self._vec12 = vec12 def __str__(self): string = f'translation : {self.translation}\n' string += f'rotation : {self.rotation}\n' return string class Rigid2D(Rigid): param_inds = [0, 1, 5] class Similarity(Affine): param_inds = list(range(7)) def from_matrix44(self, aff): """ Convert a 4x4 matrix describing a similarity transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for pre-rotation when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[:3] = aff[:3, 3] ## A = s R ==> det A = (s)**3 ==> s = (det A)**(1/3) A = aff[:3, :3] detA = spl.det(A) s = np.maximum(np.abs(detA) ** (1 / 3.), TINY) if detA < 0: A = -A self._direct = False vec12[3:6] = rotation_mat2vec(A / s) vec12[6:9] = np.log(s) self._vec12 = vec12 def _set_param(self, p): p = np.asarray(p) self._vec12[list(range(9))] =\ (p[[0, 1, 2, 3, 4, 5, 6, 6, 6]] * self._precond[list(range(9))]) param = property(Affine._get_param, _set_param) def __str__(self): string = f'translation : {self.translation}\n' string += f'rotation : {self.rotation}\n' string += f'scaling : {self.scaling[0]}\n' return string class Similarity2D(Similarity): param_inds = [0, 1, 5, 6] def _set_param(self, p): p = np.asarray(p) self._vec12[[0, 1, 5, 6, 7, 8]] =\ (p[[0, 1, 2, 3, 3, 3]] * self._precond[[0, 1, 5, 6, 7, 8]]) param = property(Similarity._get_param, _set_param) affine_transforms = {'affine': Affine, 'affine2d': Affine2D, 'similarity': Similarity, 'similarity2d': Similarity2D, 'rigid': Rigid, 'rigid2d': Rigid2D} nipy-0.6.1/nipy/algorithms/registration/chain_transform.py000066400000000000000000000036751470056100100240730ustar00rootroot00000000000000""" Chain transforms """ from .affine import Affine class ChainTransform: def __init__(self, optimizable, pre=None, post=None): """ Create chain transform instance Parameters ---------- optimizable : array or Transform Transform that we are optimizing. If this is an array, then assume it's an affine matrix. pre : None or array or Transform, optional If not None, a transform that should be applied to points before applying the `optimizable` transform. If an array, then assume it's an affine matrix. post : None or Transform, optional If not None, a transform that should be applied to points after applying any `pre` transform, and then the `optimizable` transform. If an array, assume it's an affine matrix """ if not hasattr(optimizable, 'param'): raise ValueError('Input transform should be optimizable') if not hasattr(optimizable, 'apply'): optimizable = Affine(optimizable) if not hasattr(pre, 'apply'): pre = Affine(pre) if not hasattr(post, 'apply'): post = Affine(post) self.optimizable = optimizable self.pre = pre self.post = post def apply(self, pts): """ Apply full transformation to points `pts` If there are N points, then `pts` will be N by 3 Parameters ---------- pts : array-like array of points Returns ------- transformed_pts : array N by 3 array of transformed points """ composed = self.post.compose(self.optimizable.compose(self.pre)) return composed.apply(pts) def _set_param(self, param): self.optimizable.param = param def _get_param(self): return self.optimizable.param param = property(_get_param, _set_param, None, 'get/set param') nipy-0.6.1/nipy/algorithms/registration/cubic_spline.c000066400000000000000000000476451470056100100231540ustar00rootroot00000000000000#include "cubic_spline.h" #include #include #include /* Useful marcos */ #define ABS(a) ( (a) > 0.0 ? (a) : (-(a)) ) #define FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) #define ROUND(a)(FLOOR(a+0.5)) #ifdef _MSC_VER #define inline __inline #endif /* Three different boundary conditions are implemented: mode == 0 : 'zero' mode == 1: 'nearest' mode == 2: 'reflect' Depending on the mode, the input coordinate x is mirrored so as to fall within the image bounds [0..ddim] and a weight w is computed. */ #define APPLY_BOUNDARY_CONDITIONS(mode, x, w, ddim) \ if (!_apply_boundary_conditions(mode, ddim, &x, &w)) \ return 0.0; #define COMPUTE_NEIGHBORS(x, ddim, nx, px) \ if (!_mirror_grid_neighbors(x, ddim, &nx, &px)) \ return 0.0; /* The following marco forces numpy to consider a PyArrayIterObject non-contiguous. Otherwise, coordinates won't be updated - don't know whether this is a bug or not. */ #define UPDATE_ITERATOR_COORDS(iter) \ iter->contiguous = 0; static void _cubic_spline_transform1d(double* res, double* src, unsigned int dim, unsigned int res_stride, unsigned int src_stride); static void _cubic_spline_transform(PyArrayObject* res, int axis, double* work); static inline void _copy_double_buffer(double* res, double* src, unsigned int dim, unsigned int src_stride); static inline int _mirrored_position(int x, unsigned int ddim); static inline int _apply_boundary_conditions(int mode, unsigned int ddim, double* x, double* w); static inline int _mirror_grid_neighbors(double x, unsigned int ddim, int* nx, int* px); static inline void _apply_affine_transform(double* Tx, double* Ty, double* Tz, const double* Tvox, size_t x, size_t y, size_t z); /* Returns the value of the cubic B-spline function at x */ double cubic_spline_basis (double x) { double y, absx, aux; absx = ABS(x); if (absx >= 2) return 0.0; if (absx < 1) { aux = absx*absx; y = 0.66666666666667 - aux + 0.5*absx*aux; } else { aux = 2 - absx; y = aux*aux*aux / 6.0; } return y; } /* Assumes that src and res are same size and both point to DOUBLE buffers. */ static void _cubic_spline_transform1d(double* res, double* src, unsigned int dim, unsigned int res_stride, unsigned int src_stride) { int k; double cp, cm, z1_k; double *buf_src, *buf_res; double z1 = -0.26794919243112; /* -2 + sqrt(3) */ double cz1 = 0.28867513459481; /* z1/(z1^2-1) */ /* Initial value for the causal recursion. We use a mirror symmetric boundary condition for the discrete signal, yielding: cp(0) = (1/2-z1^(2N-2)) \sum_{k=0}^{2N-3} s(k) z1^k s(k), where we set: s(N)=s(N-2), s(N+1)=s(N-3), ..., s(2N-3)=s(1). */ buf_src = src; cp = *buf_src; z1_k = 1; for (k=1; k=0; k--) { */ for (k=1; kao, axis); stride = PyArray_STRIDE((PyArrayObject*)iter->ao, axis)/sizeof(double); /* Apply the cubic spline transform along given axis */ while(iter->index < iter->size) { _copy_double_buffer(work, PyArray_ITER_DATA(iter), dim, stride); _cubic_spline_transform1d(PyArray_ITER_DATA(iter), work, dim, stride, 1); PyArray_ITER_NEXT(iter); } /* Free local structures */ Py_DECREF(iter); return; } void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src) { double* work; unsigned int axis, aux=0, dimmax=0; /* Copy src into res */ PyArray_CopyInto(res, (PyArrayObject*)src); /* Compute the maximum array dimension over axes */ for(axis=0; axis dimmax) dimmax = aux; } /* Allocate auxiliary buffer */ work = (double*)malloc(sizeof(double)*dimmax); /* Apply separable cubic spline transforms */ for(axis=0; axisindex < imIter->size) { x = imIter->coordinates[0]; y = imIter->coordinates[1]; z = imIter->coordinates[2]; _apply_affine_transform(&Tx, &Ty, &Tz, Tvox, x, y, z); i1 = cubic_spline_sample3d(Tx, Ty, Tz, im_spline_coeff, mode_x, mode_y, mode_z); /* Copy interpolated value into numpy array */ py_i1 = PyFloat_FromDouble(i1); PyArray_SETITEM(im_resampled, PyArray_ITER_DATA(imIter), py_i1); Py_DECREF(py_i1); /* Increment iterator */ PyArray_ITER_NEXT(imIter); } /* Free memory */ Py_DECREF(imIter); Py_DECREF(im_spline_coeff); return; } static inline void _apply_affine_transform(double* Tx, double* Ty, double* Tz, const double* Tvox, size_t x, size_t y, size_t z) { double* bufTvox = (double*)Tvox; *Tx = (*bufTvox)*x; bufTvox++; *Tx += (*bufTvox)*y; bufTvox++; *Tx += (*bufTvox)*z; bufTvox++; *Tx += *bufTvox; bufTvox++; *Ty = (*bufTvox)*x; bufTvox++; *Ty += (*bufTvox)*y; bufTvox++; *Ty += (*bufTvox)*z; bufTvox++; *Ty += *bufTvox; bufTvox++; *Tz = (*bufTvox)*x; bufTvox++; *Tz += (*bufTvox)*y; bufTvox++; *Tz += (*bufTvox)*z; bufTvox++; *Tz += *bufTvox; return; } /* Convert an input grid coordinate x into another grid coordinate within [0, ddim], possibly using a reflection. This function implicitly assumes that -ddim < x < 2*ddim */ static inline int _mirrored_position(int x, unsigned int ddim) { if (x < 0) return -x; else if (x > ddim) return 2 * ddim - x; else return x; } /* Depending on the chosen mode, mirror the position and set the weight. */ static inline int _apply_boundary_conditions(int mode, unsigned int ddim, double* x, double* w) { int ok = 1; unsigned int dim = ddim + 1; int neg_ddim; unsigned int two_ddim; if (mode == 0) { if (*x < -1) ok = 0; else if (*x < 0) { *w = 1 + *x; *x = 0; } else if (*x > dim) ok = 0; else if (*x > ddim) { *w = dim - *x; *x = ddim; } } else if (mode == 1) { if (*x < 0) *x = 0; else if (*x > ddim) *x = ddim; } else{ /* mode==2 */ neg_ddim = -ddim; two_ddim = 2 * ddim; if ((*x < neg_ddim) || (*x > two_ddim)) ok = 0; } return ok; } /* Compute left and right cubic spline neighbors in the image grid mirrored once on each side. Returns 0 if no neighbor can be found. */ static inline int _mirror_grid_neighbors(double x, unsigned int ddim, int* nx, int* px) { int ok = 0; *px = (int)(x+ddim+2); if ((*px>=3) && (*px<=3*ddim)) { ok = 1; *px = *px-ddim; *nx = *px-3; } return ok; } static inline int _neighbors_zero_outside(double x, unsigned int ddim, int* nx, int* px, double* weight) { int ok = 0, aux; unsigned int dim = ddim+1; *weight = 1; if ((x>-1) && (xdim) { /* ddim<=x /* * Use extension numpy symbol table */ #define NO_IMPORT_ARRAY #include "_registration.h" #include /*! \brief Cubic spline basis function \param x input value */ extern double cubic_spline_basis(double x); /*! \brief Cubic spline transform of a one-dimensional signal \param src input signal \param res output signal (same size) */ extern void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src); extern double cubic_spline_sample1d(double x, const PyArrayObject* coef, int mode); extern double cubic_spline_sample2d(double x, double y, const PyArrayObject* coef, int mode_x, int mode_y); extern double cubic_spline_sample3d(double x, double y, double z, const PyArrayObject* coef, int mode_x, int mode_y, int mode_z); extern double cubic_spline_sample4d(double x, double y, double z, double t, const PyArrayObject* coef, int mode_x, int mode_y, int mode_z, int mode_t); extern void cubic_spline_resample3d(PyArrayObject* im_resampled, const PyArrayObject* im, const double* Tvox, int mode_x, int mode_y, int mode_z); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/registration/groupwise_registration.py000066400000000000000000001346641470056100100255370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Motion correction / motion correction with slice timing Routines implementing motion correction and motion correction combined with slice-timing. See: Roche, Alexis (2011) A four-dimensional registration algorithm with application to joint correction of motion and slice timing in fMRI. *Medical Imaging, IEEE Transactions on*; 30:1546--1554 """ import os import warnings import numpy as np from nibabel import io_orientation from nibabel.affines import apply_affine from ...core.image.image_spaces import as_xyz_image, make_xyz_image, xyz_affine from ...io.nibcompat import get_header from ..slicetiming import timefuncs from ._registration import _cspline_sample3d, _cspline_sample4d, _cspline_transform from .affine import Affine, Rigid from .optimizer import configure_optimizer, use_derivatives from .type_check import check_type, check_type_and_shape VERBOSE = os.environ.get('NIPY_DEBUG_PRINT', False) INTERLEAVED = None XTOL = 1e-5 FTOL = 1e-5 GTOL = 1e-5 STEPSIZE = 1e-6 SMALL = 1e-20 MAXITER = 64 MAXFUN = None def interp_slice_times(Z, slice_times, tr): Z = np.asarray(Z) nslices = len(slice_times) aux = np.asarray(list(slice_times) + [slice_times[0] + tr]) Zf = np.floor(Z).astype('int') w = Z - Zf Zal = Zf % nslices Za = Zal + w ret = (1 - w) * aux[Zal] + w * aux[Zal + 1] ret += (Z - Za) return ret def scanner_coords(xyz, affine, from_world, to_world): Tv = np.dot(from_world, np.dot(affine, to_world)) XYZ = apply_affine(Tv, xyz) return XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] def make_grid(dims, subsampling=(1, 1, 1), borders=(0, 0, 0)): slices = [slice(b, d - b, s)\ for d, s, b in zip(dims, subsampling, borders)] xyz = np.mgrid[slices] xyz = np.rollaxis(xyz, 0, 4) xyz = np.reshape(xyz, [np.prod(xyz.shape[0:-1]), 3]) return xyz def guess_slice_axis_and_direction(slice_info, affine): if slice_info is None: orient = io_orientation(affine) slice_axis = int(np.where(orient[:, 0] == 2)[0]) slice_direction = int(orient[slice_axis, 1]) else: slice_axis = int(slice_info[0]) slice_direction = int(slice_info[1]) return slice_axis, slice_direction def tr_from_header(images): """ Return the TR from the header of an image or list of images. Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several sessions. Returns ------- float Repetition time, as specified in NIfTI header. Raises ------ ValueError if the TR between the images is inconsistent. """ if not isinstance(images, list): images = [images] images_tr = None for image in images: tr = get_header(image).get_zooms()[3] if images_tr is None: images_tr = tr if tr != images_tr: raise ValueError('TR inconsistent between images.') return images_tr class Image4d: """ Class to represent a sequence of 3d scans (possibly acquired on a slice-by-slice basis). Object remains empty until the data array is actually loaded in memory. Parameters ---------- data : nd array or proxy (function that actually gets the array) """ def __init__(self, data, affine, tr, slice_times, slice_info=None): """ Configure fMRI acquisition time parameters. """ self.affine = np.asarray(affine) self.tr = float(tr) # guess the slice axis and direction (z-axis) self.slice_axis, self.slice_direction =\ guess_slice_axis_and_direction(slice_info, self.affine) # unformatted parameters self._slice_times = slice_times if isinstance(data, np.ndarray): self._data = data self._shape = data.shape self._get_data = None self._init_timing_parameters() else: self._data = None self._shape = None self._get_data = data def _load_data(self): self._data = self._get_data() self._shape = self._data.shape self._init_timing_parameters() def get_fdata(self): if self._data is None: self._load_data() return self._data def get_shape(self): if self._shape is None: self._load_data() return self._shape def _init_timing_parameters(self): # Number of slices nslices = self.get_shape()[self.slice_axis] self.nslices = nslices # Set slice times if isinstance(self._slice_times, (int, float)): # If a single value is provided, assume synchronous slices self.slice_times = np.zeros(nslices) self.slice_times.fill(self._slice_times) else: # Verify correctness of provided slice times if not len(self._slice_times) == nslices: raise ValueError( "Incorrect slice times were provided. There are %d " "slices in the volume, `slice_times` argument has length %d" % (nslices, len(self._slice_times))) self.slice_times = np.asarray(self._slice_times) # Check that slice times are smaller than repetition time if np.max(self.slice_times) > self.tr: raise ValueError("slice times should be smaller than repetition time") def z_to_slice(self, z): """ Account for the fact that slices may be stored in reverse order wrt the scanner coordinate system convention (slice 0 == bottom of the head) """ if self.slice_direction < 0: return self.nslices - 1 - z else: return z def scanner_time(self, zv, t): """ tv = scanner_time(zv, t) zv, tv are grid coordinates; t is an actual time value. """ corr = interp_slice_times(self.z_to_slice(zv), self.slice_times, self.tr) return (t - corr) / self.tr def free_data(self): if self._get_data is not None: self._data = None class Realign4dAlgorithm: def __init__(self, im4d, affine_class=Rigid, transforms=None, time_interp=True, subsampling=(1, 1, 1), refscan=0, borders=(1, 1, 1), optimizer='ncg', optimize_template=True, xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN): # Check arguments check_type_and_shape(subsampling, int, 3) check_type(refscan, int, accept_none=True) check_type_and_shape(borders, int, 3) check_type(xtol, float) check_type(ftol, float) check_type(gtol, float) check_type(stepsize, float) check_type(maxiter, int) check_type(maxfun, int, accept_none=True) # Get dimensional parameters self.dims = im4d.get_shape() self.nscans = self.dims[3] # Reduce borders if spatial image dimension too small to avoid # getting an empty volume of interest borders = [min(b, d/2 - (not d%2)) for (b, d) in zip(borders, self.dims[0:3])] self.xyz = make_grid(self.dims[0:3], subsampling, borders) masksize = self.xyz.shape[0] self.data = np.zeros([masksize, self.nscans], dtype='double') # Initialize space/time transformation parameters self.affine = im4d.affine self.inv_affine = np.linalg.inv(self.affine) if transforms is None: self.transforms = [affine_class() for scan in range(self.nscans)] else: self.transforms = transforms # Compute the 4d cubic spline transform self.time_interp = time_interp if time_interp: self.timestamps = im4d.tr * np.arange(self.nscans) self.scanner_time = im4d.scanner_time self.cbspline = _cspline_transform(im4d.get_fdata()) else: self.cbspline = np.zeros(self.dims, dtype='double') for t in range(self.dims[3]): self.cbspline[:, :, :, t] =\ _cspline_transform(im4d.get_fdata()[:, :, :, t]) # The reference scan conventionally defines the head # coordinate system self.optimize_template = optimize_template if not optimize_template and refscan is None: self.refscan = 0 else: self.refscan = refscan # Set the minimization method self.set_fmin(optimizer, stepsize, xtol=xtol, ftol=ftol, gtol=gtol, maxiter=maxiter, maxfun=maxfun) # Auxiliary array for realignment estimation self._res = np.zeros(masksize, dtype='double') self._res0 = np.zeros(masksize, dtype='double') self._aux = np.zeros(masksize, dtype='double') self.A = np.zeros((masksize, self.transforms[0].param.size), dtype='double') self._pc = None def resample(self, t): """ Resample a particular time frame on the (sub-sampled) working grid. x,y,z,t are "head" grid coordinates X,Y,Z,T are "scanner" grid coordinates """ X, Y, Z = scanner_coords(self.xyz, self.transforms[t].as_affine(), self.inv_affine, self.affine) if self.time_interp: T = self.scanner_time(Z, self.timestamps[t]) _cspline_sample4d(self.data[:, t], self.cbspline, X, Y, Z, T, mx='reflect', my='reflect', mz='reflect', mt='reflect') else: _cspline_sample3d(self.data[:, t], self.cbspline[:, :, :, t], X, Y, Z, mx='reflect', my='reflect', mz='reflect') def resample_full_data(self): if VERBOSE: print('Gridding...') xyz = make_grid(self.dims[0:3]) res = np.zeros(self.dims) for t in range(self.nscans): if VERBOSE: print('Fully resampling scan %d/%d' % (t + 1, self.nscans)) X, Y, Z = scanner_coords(xyz, self.transforms[t].as_affine(), self.inv_affine, self.affine) if self.time_interp: T = self.scanner_time(Z, self.timestamps[t]) _cspline_sample4d(res[:, :, :, t], self.cbspline, X, Y, Z, T, mt='nearest') else: _cspline_sample3d(res[:, :, :, t], self.cbspline[:, :, :, t], X, Y, Z) return res def set_fmin(self, optimizer, stepsize, **kwargs): """ Return the minimization function """ self.stepsize = stepsize self.optimizer = optimizer self.optimizer_kwargs = kwargs self.optimizer_kwargs.setdefault('xtol', XTOL) self.optimizer_kwargs.setdefault('ftol', FTOL) self.optimizer_kwargs.setdefault('gtol', GTOL) self.optimizer_kwargs.setdefault('maxiter', MAXITER) self.optimizer_kwargs.setdefault('maxfun', MAXFUN) self.use_derivatives = use_derivatives(self.optimizer) def init_instant_motion(self, t): """ Pre-compute and cache some constants (at fixed time) for repeated computations of the alignment energy. The idea is to decompose the average temporal variance via: V = (n-1)/n V* + (n-1)/n^2 (x-m*)^2 with x the considered volume at time t, and m* the mean of all resampled volumes but x. Only the second term is variable when one volumes while the others are fixed. A similar decomposition is used for the global variance, so we end up with: V/V0 = [nV* + (x-m*)^2] / [nV0* + (x-m0*)^2] """ fixed = list(range(self.nscans)) fixed.remove(t) aux = self.data[:, fixed] if self.optimize_template: self.mu = np.mean(aux, 1) self.offset = self.nscans * np.mean((aux.T - self.mu) ** 2) self.mu0 = np.mean(aux) self.offset0 = self.nscans * np.mean((aux - self.mu0) ** 2) self._t = t self._pc = None def set_transform(self, t, pc): self.transforms[t].param = pc self.resample(t) def _init_energy(self, pc): if pc is self._pc: return self.set_transform(self._t, pc) self._pc = pc self._res[:] = self.data[:, self._t] - self.mu[:] self._V = np.maximum(self.offset + np.mean(self._res ** 2), SMALL) self._res0[:] = self.data[:, self._t] - self.mu0 self._V0 = np.maximum(self.offset0 + np.mean(self._res0 ** 2), SMALL) if self.use_derivatives: # linearize the data wrt the transform parameters # use the auxiliary array to save the current resampled data self._aux[:] = self.data[:, self._t] basis = np.eye(6) for j in range(pc.size): self.set_transform(self._t, pc + self.stepsize * basis[j]) self.A[:, j] = (self.data[:, self._t] - self._aux)\ / self.stepsize self.transforms[self._t].param = pc self.data[:, self._t] = self._aux[:] # pre-compute gradient and hessian of numerator and # denominator c = 2 / float(self.data.shape[0]) self._dV = c * np.dot(self.A.T, self._res) self._dV0 = c * np.dot(self.A.T, self._res0) self._H = c * np.dot(self.A.T, self.A) def _energy(self): """ The alignment energy is defined as the log-ratio between the average temporal variance in the sequence and the global spatio-temporal variance. """ return np.log(self._V / self._V0) def _energy_gradient(self): return self._dV / self._V - self._dV0 / self._V0 def _energy_hessian(self): return (1 / self._V - 1 / self._V0) * self._H\ - np.dot(self._dV, self._dV.T) / np.maximum(self._V ** 2, SMALL)\ + np.dot(self._dV0, self._dV0.T) / np.maximum(self._V0 ** 2, SMALL) def estimate_instant_motion(self, t): """ Estimate motion parameters at a particular time. """ if VERBOSE: print('Estimating motion at time frame %d/%d...' % (t + 1, self.nscans)) def f(pc): self._init_energy(pc) return self._energy() def fprime(pc): self._init_energy(pc) return self._energy_gradient() def fhess(pc): self._init_energy(pc) return self._energy_hessian() self.init_instant_motion(t) fmin, args, kwargs =\ configure_optimizer(self.optimizer, fprime=fprime, fhess=fhess, **self.optimizer_kwargs) # With scipy >= 0.9, some scipy minimization functions like # fmin_bfgs may crash due to the subroutine # `scalar_search_armijo` returning None as a stepsize when # unhappy about the objective function. This seems to have the # potential to occur in groupwise registration when using # strong image subsampling, i.e. at the coarser levels of the # multiscale pyramid. To avoid crashes, we insert a try/catch # instruction. try: pc = fmin(f, self.transforms[t].param, disp=VERBOSE, *args, **kwargs) self.set_transform(t, pc) except: warnings.warn('Minimization failed') def estimate_motion(self): """ Optimize motion parameters for the whole sequence. All the time frames are initially resampled according to the current space/time transformation, the parameters of which are further optimized sequentially. """ for t in range(self.nscans): if VERBOSE: print('Resampling scan %d/%d' % (t + 1, self.nscans)) self.resample(t) # Set the template as the reference scan (will be overwritten # if template is to be optimized) if not hasattr(self, 'template'): self.mu = self.data[:, self.refscan].copy() for t in range(self.nscans): self.estimate_instant_motion(t) if VERBOSE: print(self.transforms[t]) def align_to_refscan(self): """ The `motion_estimate` method aligns scans with an online template so that spatial transforms map some average head space to the scanner space. To conventionally redefine the head space as being aligned with some reference scan, we need to right compose each head_average-to-scanner transform with the refscan's 'to head_average' transform. """ if self.refscan is None: return Tref_inv = self.transforms[self.refscan].inv() for t in range(self.nscans): self.transforms[t] = (self.transforms[t]).compose(Tref_inv) def resample4d(im4d, transforms, time_interp=True): """ Resample a 4D image according to the specified sequence of spatial transforms, using either 4D interpolation if `time_interp` is True and 3D interpolation otherwise. """ r = Realign4dAlgorithm(im4d, transforms=transforms, time_interp=time_interp) res = r.resample_full_data() im4d.free_data() return res def adjust_subsampling(speedup, dims): dims = np.array(dims) aux = np.maximum(speedup * dims / np.prod(dims) ** (1 / 3.), [1, 1, 1]) return aux.astype('int') def single_run_realign4d(im4d, affine_class=Rigid, time_interp=True, loops=5, speedup=5, refscan=0, borders=(1, 1, 1), optimizer='ncg', xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN): """ Realign a single run in space and time. Parameters ---------- im4d : Image4d instance speedup : int or sequence If a sequence, implement a multi-scale realignment """ if type(loops) not in (list, tuple, np.array): loops = [loops] repeats = len(loops) def format_arg(x): if type(x) not in (list, tuple, np.array): x = [x for i in range(repeats)] else: if not len(x) == repeats: raise ValueError('inconsistent length in arguments') return x speedup = format_arg(speedup) optimizer = format_arg(optimizer) xtol = format_arg(xtol) ftol = format_arg(ftol) gtol = format_arg(gtol) stepsize = format_arg(stepsize) maxiter = format_arg(maxiter) maxfun = format_arg(maxfun) transforms = None opt_params = zip(loops, speedup, optimizer, xtol, ftol, gtol, stepsize, maxiter, maxfun) for loops_, speedup_, optimizer_, xtol_, ftol_, gtol_,\ stepsize_, maxiter_, maxfun_ in opt_params: subsampling = adjust_subsampling(speedup_, im4d.get_shape()[0:3]) r = Realign4dAlgorithm(im4d, transforms=transforms, affine_class=affine_class, time_interp=time_interp, subsampling=subsampling, refscan=refscan, borders=borders, optimizer=optimizer_, xtol=xtol_, ftol=ftol_, gtol=gtol_, stepsize=stepsize_, maxiter=maxiter_, maxfun=maxfun_) for loop in range(loops_): r.estimate_motion() r.align_to_refscan() transforms = r.transforms im4d.free_data() return transforms def realign4d(runs, affine_class=Rigid, time_interp=True, align_runs=True, loops=5, between_loops=5, speedup=5, refscan=0, borders=(1, 1, 1), optimizer='ncg', xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN): """ Parameters ---------- runs : list of Image4d objects Returns ------- transforms : list nested list of rigid transformations transforms map an 'ideal' 4d grid (conventionally aligned with the first scan of the first run) to the 'acquisition' 4d grid for each run """ # Single-session case if type(runs) not in (list, tuple, np.array): runs = [runs] nruns = len(runs) if nruns == 1: align_runs = False # Correct motion and slice timing in each sequence separately transforms = [single_run_realign4d(run, affine_class=affine_class, time_interp=time_interp, loops=loops, speedup=speedup, refscan=refscan, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun) for run in runs] if not align_runs: return transforms, transforms, None # Correct between-session motion using the mean image of each # corrected run, and creating a fake time series with no temporal # smoothness. If the runs have different affines, a correction is # applied to the transforms associated with each run (except for # the first run) so that all images included in the fake series # have the same affine, namely that of the first run. is_same_affine = lambda a1, a2: np.max(np.abs(a1 - a2)) < 1e-5 mean_img_shape = list(runs[0].get_shape()[0:3]) + [nruns] mean_img_data = np.zeros(mean_img_shape) for i in range(nruns): if is_same_affine(runs[0].affine, runs[i].affine): transforms_i = transforms[i] else: runs[i].affine = runs[0].affine aff_corr = Affine(np.dot(runs[0].affine, np.linalg.inv(runs[i].affine))) transforms_i = [aff_corr.compose(Affine(t.as_affine()))\ for t in transforms[i]] corr_run = resample4d(runs[i], transforms=transforms_i, time_interp=time_interp) mean_img_data[..., i] = corr_run.mean(3) del corr_run mean_img = Image4d(mean_img_data, affine=runs[0].affine, tr=1.0, slice_times=0) transfo_mean = single_run_realign4d(mean_img, affine_class=affine_class, time_interp=False, loops=between_loops, speedup=speedup, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun) # Compose transformations for each run ctransforms = [None for i in range(nruns)] for i in range(nruns): ctransforms[i] = [t.compose(transfo_mean[i]) for t in transforms[i]] return ctransforms, transforms, transfo_mean class Realign4d: def __init__(self, images, tr, slice_times=None, slice_info=None, affine_class=Rigid): """ Spatiotemporal realignment class for series of 3D images. The algorithm performs simultaneous motion and slice timing correction for fMRI series or other data where slices are not acquired simultaneously. Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several sessions. tr : float Inter-scan repetition time, i.e. the time elapsed between two consecutive scans. The unit in which `tr` is given is arbitrary although it needs to be consistent with the `slice_times` argument. slice_times : None or array-like If None, slices are assumed to be acquired simultaneously hence no slice timing correction is performed. If array-like, then the slice acquisition times. slice_info : None or tuple, optional None, or a tuple with slice axis as the first element and direction as the second, for instance (2, 1). If None, then guess the slice axis, and direction, as the closest to the z axis, as estimated from the affine. """ self._init(images, tr, slice_times, slice_info, affine_class) def _init(self, images, tr, slice_times, slice_info, affine_class): """ Generic initialization method. """ if slice_times is None: tr = 1.0 slice_times = 0.0 time_interp = False else: time_interp = True if not isinstance(images, (list, tuple, np.ndarray)): images = [images] if tr is None: raise ValueError('Repetition time cannot be None.') if tr == 0: raise ValueError('Repetition time cannot be zero.') self.affine_class = affine_class self.slice_times = slice_times self.tr = tr self._runs = [] # Note that, the affine of each run may be different. This is # the case, for instance, if the subject exits the scanner # in between sessions. for im in images: xyz_img = as_xyz_image(im) self._runs.append(Image4d(xyz_img.get_fdata, xyz_affine(xyz_img), tr, slice_times=slice_times, slice_info=slice_info)) self._transforms = [None for run in self._runs] self._within_run_transforms = [None for run in self._runs] self._mean_transforms = [None for run in self._runs] self._time_interp = time_interp def estimate(self, loops=5, between_loops=None, align_runs=True, speedup=5, refscan=0, borders=(1, 1, 1), optimizer='ncg', xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN): """Estimate motion parameters. Parameters ---------- loops : int or sequence of ints Determines the number of iterations performed to realign scans within each run for each pass defined by the ``speedup`` argument. For instance, setting ``speedup`` == (5,2) and ``loops`` == (5,1) means that 5 iterations are performed in a first pass where scans are subsampled by an isotropic factor 5, followed by one iteration where scans are subsampled by a factor 2. between_loops : None, int or sequence of ints Similar to ``loops`` for between-run motion estimation. Determines the number of iterations used to realign scans across runs, a procedure similar to within-run realignment that uses the mean images from each run. If None, assumed to be the same as ``loops``. The setting used in the experiments described in Roche, IEEE TMI 2011, was: ``speedup`` = (5, 2), ``loops`` = (5, 1) and ``between_loops`` = (5, 1). align_runs : bool Determines whether between-run motion is estimated or not. If False, the ``between_loops`` argument is ignored. speedup: int or sequence of ints Determines an isotropic sub-sampling factor, or a sequence of such factors, applied to the scans to perform motion estimation. If a sequence, several estimation passes are applied. refscan : None or int Defines the number of the scan used as the reference coordinate system for each run. If None, a reference coordinate system is defined internally that does not correspond to any particular scan. Note that the coordinate system associated with the first run is always borders : sequence of ints Should be of length 3. Determines the field of view for motion estimation in terms of the number of slices at each extremity of the reference grid that are ignored for motion parameter estimation. For instance, ``borders``==(1,1,1) means that the realignment cost function will not take into account voxels located in the first and last axial/sagittal/coronal slices in the reference grid. Please note that this choice only affects parameter estimation but does not affect image resampling in any way, see ``resample`` method. optimizer : str Defines the optimization method. One of 'simplex', 'powell', 'cg', 'ncg', 'bfgs' and 'steepest'. xtol : float Tolerance on variations of transformation parameters to test numerical convergence. ftol : float Tolerance on variations of the intensity comparison metric to test numerical convergence. gtol : float Tolerance on the gradient of the intensity comparison metric to test numerical convergence. Applicable to optimizers 'cg', 'ncg', 'bfgs' and 'steepest'. stepsize : float Step size to approximate the gradient and Hessian of the intensity comparison metric w.r.t. transformation parameters. Applicable to optimizers 'cg', 'ncg', 'bfgs' and 'steepest'. maxiter : int Maximum number of iterations in optimization. maxfun : int Maximum number of function evaluations in maxfun. """ if between_loops is None: between_loops = loops t = realign4d(self._runs, affine_class=self.affine_class, time_interp=self._time_interp, align_runs=align_runs, loops=loops, between_loops=between_loops, speedup=speedup, refscan=refscan, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun) self._transforms, self._within_run_transforms,\ self._mean_transforms = t def resample(self, r=None, align_runs=True): """ Return the resampled run number r as a 4d nipy-like image. Returns all runs as a list of images if r is None. """ if align_runs: transforms = self._transforms else: transforms = self._within_run_transforms runs = range(len(self._runs)) if r is None: data = [resample4d(self._runs[r], transforms=transforms[r], time_interp=self._time_interp) for r in runs] return [make_xyz_image(data[r], self._runs[r].affine, 'scanner') for r in runs] else: data = resample4d(self._runs[r], transforms=transforms[r], time_interp=self._time_interp) return make_xyz_image(data, self._runs[r].affine, 'scanner') class SpaceTimeRealign(Realign4d): def __init__(self, images, tr, slice_times, slice_info, affine_class=Rigid): """ Spatiotemporal realignment class for fMRI series. This class gives a high-level interface to :class:`Realign4d` Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several fMRI runs. tr : None or float or "header-allow-1.0" Inter-scan repetition time in seconds, i.e. the time elapsed between two consecutive scans. If None, an attempt is made to read the TR from the header, but an exception is thrown for values 0 or 1. A value of "header-allow-1.0" will signal to accept a header TR of 1. slice_times : str or callable or array-like If str, one of the function names in ``SLICETIME_FUNCTIONS`` dictionary from :mod:`nipy.algorithms.slicetiming.timefuncs`. If callable, a function taking two parameters: ``n_slices`` and ``tr`` (number of slices in the images, inter-scan repetition time in seconds). This function returns a vector of times of slice acquisition $t_i$ for each slice $i$ in the volumes. See :mod:`nipy.algorithms.slicetiming.timefuncs` for a collection of functions for common slice acquisition schemes. If array-like, then should be a slice time vector as above. slice_info : int or length 2 sequence If int, the axis in `images` that is the slice axis. In a 4D image, this will often be axis = 2. If a 2 sequence, then elements are ``(slice_axis, slice_direction)``, where ``slice_axis`` is the slice axis in the image as above, and ``slice_direction`` is 1 if the slices were acquired slice 0 first, slice -1 last, or -1 if acquired slice -1 first, slice 0 last. If `slice_info` is an int, assume ``slice_direction`` == 1. affine_class : ``Affine`` class, optional transformation class to use to calculate transformations between the volumes. Default is :class:``Rigid`` """ if tr is None: tr = tr_from_header(images) if tr == 1: raise ValueError('A TR of 1 was found in the header. ' 'This value often stands in for an unknown TR. ' 'Please specify TR explicitly. Alternatively ' 'consider setting TR to "header-allow-1.0".') elif tr == "header-allow-1.0": tr = tr_from_header(images) if tr == 0: raise ValueError('Repetition time cannot be zero.') if slice_times is None: raise ValueError("slice_times must be set for space/time " "registration; use SpaceRealign for space-only " "registration") if slice_info is None: raise ValueError("slice_info cannot be None") try: len(slice_info) except TypeError: # Presumably an int slice_axis = slice_info slice_info = (slice_axis, 1) else: # sequence slice_axis, slice_direction = slice_info if type(images) in (list, tuple): n_slices = images[0].shape[slice_axis] else: n_slices = images.shape[slice_axis] if isinstance(slice_times, str): slice_times = timefuncs.SLICETIME_FUNCTIONS[slice_times] if hasattr(slice_times, '__call__'): slice_times = slice_times(n_slices, tr) self._init(images, tr, slice_times, slice_info, affine_class) class SpaceRealign(Realign4d): def __init__(self, images, affine_class=Rigid): """ Spatial registration of time series with no time interpolation Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several fMRI runs. affine_class : ``Affine`` class, optional transformation class to use to calculate transformations between the volumes. Default is :class:``Rigid`` """ self._init(images, 1., None, None, affine_class) class FmriRealign4d(Realign4d): def __init__(self, images, slice_order=None, tr=None, tr_slices=None, start=0.0, interleaved=None, time_interp=None, slice_times=None, affine_class=Rigid, slice_info=None): """ Spatiotemporal realignment class for fMRI series. This class is similar to `Realign4d` but provides a more flexible API for initialization in order to make it easier to declare slice acquisition times for standard sequences. Warning: this class is deprecated; please use :class:`SpaceTimeRealign` instead. Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several fMRI runs. slice_order : str or array-like If str, one of {'ascending', 'descending'}. If array-like, then the order in which the slices were collected in time. For instance, the following represents an ascending contiguous sequence: slice_order = [0, 1, 2, ...] Note that `slice_order` differs from the argument used e.g. in the SPM slice timing routine in that it maps spatial slice positions to slice times. It is a mapping from space to time, while SPM conventionally uses the reverse mapping from time to space. For example, for an interleaved sequence with 10 slices, where we acquired slice 0 (in space) first, then slice 2 (in space) etc, `slice_order` would be [0, 5, 1, 6, 2, 7, 3, 8, 4, 9] Using `slice_order` assumes that the inter-slice acquisition time is constant throughout acquisition. If this is not the case, use the `slice_times` argument instead and leave `slice_order` to None. tr : float Inter-scan repetition time, i.e. the time elapsed between two consecutive scans. The unit in which `tr` is given is arbitrary although it needs to be consistent with the `tr_slices` and `start` arguments if provided. If None, `tr` is computed internally assuming a regular slice acquisition scheme. tr_slices : float Inter-slice repetition time, same as `tr` for slices. If None, acquisition is assumed regular and `tr_slices` is set to `tr` divided by the number of slices. start : float Starting acquisition time (time of the first acquired slice) respective to the time origin for resampling. `start` is assumed to be given in the same unit as `tr`. Setting `start=0` means that the resampled data will be synchronous with the first acquired slice. Setting `start=-tr/2` means that the resampled data will be synchronous with the slice acquired at half repetition time. time_interp: bool Tells whether time interpolation is used or not within the realignment algorithm. If False, slices are considered to be acquired all at the same time, thus no slice timing correction will be performed. interleaved : bool Deprecated argument. Tells whether slice acquisition order is interleaved in a certain sense. Setting `interleaved` to True or False will trigger an error unless `slice_order` is 'ascending' or 'descending' and `slice_times` is None. If slice_order=='ascending' and interleaved==True, the assumed slice order is (assuming 10 slices): [0, 5, 1, 6, 2, 7, 3, 8, 4, 9] If slice_order=='descending' and interleaved==True, the assumed slice order is: [9, 4, 8, 3, 7, 2, 6, 1, 5, 0] WARNING: given that there exist other types of interleaved acquisitions depending on scanner settings and manufacturers, you should refrain from using the `interleaved` keyword argument unless you are sure what you are doing. It is generally safer to explicitly input `slice_order` or `slice_times`. slice_times : None, str or array-like This argument can be used instead of `slice_order`, `tr_slices`, `start` and `time_interp` altogether. If None, slices are assumed to be acquired simultaneously hence no slice timing correction is performed. If array-like, then `slice_times` gives the slice acquisition times along the slice axis in units that are consistent with the provided `tr`. Generally speaking, the following holds for sequences with constant inter-slice repetition time `tr_slices`: `slice_times` = `start` + `tr_slices` * `slice_order` For other sequences such as, e.g., sequences with simultaneously acquired slices, it is necessary to input `slice_times` explicitly along with `tr`. slice_info : None or tuple, optional None, or a tuple with slice axis as the first element and direction as the second, for instance (2, 1). If None, then the slice axis and direction are guessed from the first run's affine assuming that slices are collected along the closest axis to the z-axis. This means that we assume by default an axial acquisition with slice axis pointing from bottom to top of the head. """ warnings.warn('Please use SpaceTimeRealign instead of this class; ' 'We will soon remove this class', FutureWarning, stacklevel=2) # if slice_times not None, make sure that parameters redundant # with slice times all have their default value if slice_times is not None: if slice_order is not None \ or tr_slices is not None\ or start != 0.0 \ or time_interp is not None\ or interleaved is not None: raise ValueError('Attempting to set both `slice_times` ' 'and other arguments redundant with it') if tr is None: if len(slice_times) > 1: tr = slice_times[-1] + slice_times[1] - 2 * slice_times[0] else: tr = 2 * slice_times[0] warnings.warn('No `tr` entered. Assuming regular acquisition' f' with tr={tr:f}') # case where slice_time is None else: # assume regular slice acquisition, therefore tr is # arbitrary if tr is None: tr = 1.0 # if no slice order provided, assume synchronous slices if slice_order is None: if not time_interp == False: raise ValueError('Slice order is requested ' 'with time interpolation switched on') slice_times = 0.0 else: # if slice_order is a key word, replace it with the # appropriate array of slice indices if slice_order in ('ascending', 'descending'): if isinstance(images, (list, tuple, np.array)): xyz_img = as_xyz_image(images[0]) else: xyz_img = as_xyz_image(images) slice_axis, _ = guess_slice_axis_and_direction( slice_info, xyz_affine(xyz_img)) nslices = xyz_img.shape[slice_axis] if interleaved: warnings.warn('`interleaved` keyword argument is ' 'deprecated', FutureWarning, stacklevel=2) aux = np.argsort(list(range(0, nslices, 2)) + list(range(1, nslices, 2))) else: aux = np.arange(nslices) if slice_order == 'descending': aux = aux[::-1] slice_order = aux # if slice_order is provided explicitly, issue a # warning and make sure interleaved is set to None else: warnings.warn('Please make sure you are NOT using ' 'SPM-style slice order declaration') if interleaved is not None: raise ValueError('`interleaved` should be None when ' 'providing explicit slice order') slice_order = np.asarray(slice_order) if tr_slices is None: tr_slices = float(tr) / float(len(slice_order)) if start is None: start = 0.0 slice_times = start + tr_slices * slice_order self._init(images, tr, slice_times, slice_info, affine_class) nipy-0.6.1/nipy/algorithms/registration/histogram_registration.py000066400000000000000000000522621470056100100255010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Intensity-based image registration """ import numpy as np import scipy.ndimage as nd from ...core.image.image_spaces import as_xyz_image, make_xyz_image, xyz_affine from ._registration import _joint_histogram from .affine import affine_transforms, inverse_affine, subgrid_affine from .chain_transform import ChainTransform from .optimizer import configure_optimizer from .similarity_measures import similarity_measures as _sms MAX_INTC = np.iinfo(np.intc).max # Module globals VERBOSE = True # enables online print statements OPTIMIZER = 'powell' XTOL = 1e-2 FTOL = 1e-2 GTOL = 1e-3 MAXITER = 25 MAXFUN = None CLAMP_DTYPE = 'short' # do not edit NPOINTS = 64 ** 3 # Dictionary of interpolation methods (partial volume, trilinear, # random) interp_methods = {'pv': 0, 'tri': 1, 'rand': -1} class HistogramRegistration: """ A class to represent a generic intensity-based image registration algorithm. """ def __init__(self, from_img, to_img, from_bins=256, to_bins=None, from_mask=None, to_mask=None, similarity='crl1', interp='pv', smooth=0, renormalize=False, dist=None, rng=None): """ Creates a new histogram registration object. Parameters ---------- from_img : nipy-like image `From` image to_img : nipy-like image `To` image from_bins : integer Number of histogram bins to represent the `from` image to_bins : integer Number of histogram bins to represent the `to` image from_mask : array-like Mask to apply to the `from` image to_mask : array-like Mask to apply to the `to` image similarity : str or callable Cost-function for assessing image similarity. If a string, one of 'cc': correlation coefficient, 'cr': correlation ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual information, 'nmi': normalized mutual information, 'slr': supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float. dist: None or array-like Joint intensity probability distribution model for use with the 'slr' measure. Should be of shape (from_bins, to_bins). interp : str Interpolation method. One of 'pv': Partial volume, 'tri': Trilinear, 'rand': Random interpolation. See ``joint_histogram.c`` smooth : float Standard deviation in millimeters of an isotropic Gaussian kernel used to smooth the `To` image. If 0, no smoothing is applied. rng : None :class:`numpy.random.Generator` Random number generator. """ # Function assumes xyx_affine for inputs from_img = as_xyz_image(from_img) to_img = as_xyz_image(to_img) # Binning sizes if to_bins is None: to_bins = from_bins # Clamping of the `from` image. The number of bins may be # overridden if unnecessarily large. data, from_bins_adjusted = clamp(from_img.get_fdata(), from_bins, mask=from_mask) if similarity != 'slr': from_bins = from_bins_adjusted self._from_img = make_xyz_image(data, xyz_affine(from_img), 'scanner') # Set field of view in the `from` image with potential # subsampling for faster similarity evaluation. This also sets # the _from_data and _vox_coords attributes if from_mask is None: self.subsample(npoints=NPOINTS) else: corner, size = smallest_bounding_box(from_mask) self.set_fov(corner=corner, size=size, npoints=NPOINTS) # Clamping of the `to` image including padding with -1 self._smooth = float(smooth) if self._smooth < 0: raise ValueError('smoothing kernel cannot have negative scale') elif self._smooth > 0: data = smooth_image(to_img.get_fdata(), xyz_affine(to_img), self._smooth) else: data = to_img.get_fdata() data, to_bins_adjusted = clamp(data, to_bins, mask=to_mask) if similarity != 'slr': to_bins = to_bins_adjusted self._to_data = -np.ones(np.array(to_img.shape) + 2, dtype=CLAMP_DTYPE) self._to_data[1:-1, 1:-1, 1:-1] = data self._to_inv_affine = inverse_affine(xyz_affine(to_img)) # Joint histogram: must be double contiguous as it will be # passed to C routines which assume so self._joint_hist = np.zeros([from_bins, to_bins], dtype='double') # Set default registration parameters self._set_interp(interp) self._set_similarity(similarity, renormalize=renormalize, dist=dist) self.rng = np.random.default_rng() if rng is None else rng def _get_interp(self): return list(interp_methods.keys())[\ list(interp_methods.values()).index(self._interp)] def _set_interp(self, interp): self._interp = interp_methods[interp] interp = property(_get_interp, _set_interp) def _slicer(self, corner, size, spacing): return tuple( slice(int(corner[i]), int(size[i] + corner[i]), int(spacing[i])) for i in range(3)) def set_fov(self, spacing=None, corner=(0, 0, 0), size=None, npoints=None): """ Defines a subset of the `from` image to restrict joint histogram computation. Parameters ---------- spacing : sequence (3,) of positive integers Subsampling of image in voxels, where None (default) results in the subsampling to be automatically adjusted to roughly match a cubic grid with `npoints` voxels corner : sequence (3,) of positive integers Bounding box origin in voxel coordinates size : sequence (3,) of positive integers Desired bounding box size npoints : positive integer Desired number of voxels in the bounding box. If a `spacing` argument is provided, then `npoints` is ignored. """ if spacing is None and npoints is None: spacing = [1, 1, 1] if size is None: size = self._from_img.shape # Adjust spacing to match desired field of view size if spacing is not None: fov_data = self._from_img.get_fdata()[ self._slicer(corner, size, spacing)] else: fov_data = self._from_img.get_fdata()[ self._slicer(corner, size, [1, 1, 1])] spacing = ideal_spacing(fov_data, npoints=npoints) fov_data = self._from_img.get_fdata()[ self._slicer(corner, size, spacing)] self._from_data = fov_data self._from_npoints = (fov_data >= 0).sum() self._from_affine = subgrid_affine(xyz_affine(self._from_img), self._slicer(corner, size, spacing)) # We cache the voxel coordinates of the clamped image self._vox_coords =\ np.indices(self._from_data.shape).transpose((1, 2, 3, 0)) def subsample(self, spacing=None, npoints=None): self.set_fov(spacing=spacing, npoints=npoints) def _set_similarity(self, similarity, renormalize=False, dist=None): if similarity in _sms: if similarity == 'slr': if dist is None: raise ValueError('slr measure requires a joint intensity distribution model, ' 'see `dist` argument of HistogramRegistration') if dist.shape != self._joint_hist.shape: raise ValueError('Wrong shape for the `dist` argument') self._similarity = similarity self._similarity_call =\ _sms[similarity](self._joint_hist.shape, renormalize, dist) else: if not hasattr(similarity, '__call__'): raise ValueError('similarity should be callable') self._similarity = 'custom' self._similarity_call = similarity def _get_similarity(self): return self._similarity similarity = property(_get_similarity, _set_similarity) def eval(self, T): """ Evaluate similarity function given a world-to-world transform. Parameters ---------- T : Transform Transform object implementing ``apply`` method """ Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine) return self._eval(Tv) def eval_gradient(self, T, epsilon=1e-1): """ Evaluate the gradient of the similarity function wrt transformation parameters. The gradient is approximated using central finite differences at the transformation specified by `T`. The input transformation object `T` is modified in place unless it has a ``copy`` method. Parameters ---------- T : Transform Transform object implementing ``apply`` method epsilon : float Step size for finite differences in units of the transformation parameters Returns ------- g : ndarray Similarity gradient estimate """ param0 = T.param.copy() if hasattr(T, 'copy'): T = T.copy() def simi(param): T.param = param return self.eval(T) return approx_gradient(simi, param0, epsilon) def eval_hessian(self, T, epsilon=1e-1, diag=False): """ Evaluate the Hessian of the similarity function wrt transformation parameters. The Hessian or its diagonal is approximated at the transformation specified by `T` using central finite differences. The input transformation object `T` is modified in place unless it has a ``copy`` method. Parameters ---------- T : Transform Transform object implementing ``apply`` method epsilon : float Step size for finite differences in units of the transformation parameters diag : bool If True, approximate the Hessian by a diagonal matrix. Returns ------- H : ndarray Similarity Hessian matrix estimate """ param0 = T.param.copy() if hasattr(T, 'copy'): T = T.copy() def simi(param): T.param = param return self.eval(T) if diag: return np.diag(approx_hessian_diag(simi, param0, epsilon)) else: return approx_hessian(simi, param0, epsilon) def _eval(self, Tv): """ Evaluate similarity function given a voxel-to-voxel transform. Parameters ---------- Tv : Transform Transform object implementing ``apply`` method Should map voxel space to voxel space """ # trans_vox_coords needs be C-contiguous trans_vox_coords = Tv.apply(self._vox_coords) interp = self._interp if self._interp < 0: interp = -self.rng.integers(MAX_INTC) _joint_histogram(self._joint_hist, self._from_data.flat, # array iterator self._to_data, trans_vox_coords, interp) return self._similarity_call(self._joint_hist) def optimize(self, T, optimizer=OPTIMIZER, **kwargs): """ Optimize transform `T` with respect to similarity measure. The input object `T` will change as a result of the optimization. Parameters ---------- T : object or str An object representing a transformation that should implement ``apply`` method and ``param`` attribute or property. If a string, one of 'rigid', 'similarity', or 'affine'. The corresponding transformation class is then initialized by default. optimizer : str Name of optimization function (one of 'powell', 'steepest', 'cg', 'bfgs', 'simplex') **kwargs : dict keyword arguments to pass to optimizer Returns ------- T : object Locally optimal transformation """ # Replace T if a string is passed if T in affine_transforms: T = affine_transforms[T]() # Pull callback out of keyword arguments, if present callback = kwargs.pop('callback', None) # Create transform chain object with T generating params Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine) tc0 = Tv.param # Cost function to minimize def cost(tc): # This is where the similarity function is calculated Tv.param = tc return -self._eval(Tv) # Callback during optimization if callback is None and VERBOSE: def callback(tc): Tv.param = tc print(Tv.optimizable) print(str(self.similarity) + f' = {self._eval(Tv)}') print() # Switching to the appropriate optimizer if VERBOSE: print('Initial guess...') print(Tv.optimizable) kwargs.setdefault('xtol', XTOL) kwargs.setdefault('ftol', FTOL) kwargs.setdefault('gtol', GTOL) kwargs.setdefault('maxiter', MAXITER) kwargs.setdefault('maxfun', MAXFUN) fmin, args, kwargs = configure_optimizer(optimizer, fprime=None, fhess=None, **kwargs) # Output if VERBOSE: print(f'Optimizing using {fmin.__name__}') kwargs['callback'] = callback Tv.param = fmin(cost, tc0, *args, **kwargs) return Tv.optimizable def explore(self, T, *args): """ Evaluate the similarity at the transformations specified by sequences of parameter values. For instance: s, p = explore(T, (0, [-1,0,1]), (4, [-2.,2])) Parameters ---------- T : object Transformation around which the similarity function is to be evaluated. It is modified in place unless it has a ``copy`` method. args : tuple Each element of `args` is a sequence of two elements, where the first element specifies a transformation parameter axis and the second element gives the successive parameter values to evaluate along that axis. Returns ------- s : ndarray Array of similarity values p : ndarray Corresponding array of evaluated transformation parameters """ nparams = T.param.size if hasattr(T, 'copy'): T = T.copy() deltas = [[0] for i in range(nparams)] for a in args: deltas[a[0]] = a[1] grids = np.mgrid[[slice(0, len(d)) for d in deltas]] ntrials = np.prod(grids.shape[1:]) Deltas = [np.asarray(deltas[i])[grids[i, :]].ravel()\ for i in range(nparams)] simis = np.zeros(ntrials) params = np.zeros([nparams, ntrials]) Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine) param0 = Tv.param for i in range(ntrials): param = param0 + np.array([D[i] for D in Deltas]) Tv.param = param simis[i] = self._eval(Tv) params[:, i] = param return simis, params def _clamp(x, y, bins): # Threshold dmaxmax = 2 ** (8 * y.dtype.itemsize - 1) - 1 dmax = bins - 1 # default output maximum value if dmax > dmaxmax: raise ValueError('Excess number of bins') xmin = float(x.min()) xmax = float(x.max()) d = xmax - xmin """ If the image dynamic is small, no need for compression: just downshift image values and re-estimate the dynamic range (hence xmax is translated to xmax-tth casted to the appropriate dtype. Otherwise, compress after downshifting image values (values equal to the threshold are reset to zero). """ if issubclass(x.dtype.type, np.integer) and d <= dmax: y[:] = x - xmin bins = int(d) + 1 else: a = dmax / d y[:] = np.round(a * (x - xmin)) return y, bins def clamp(x, bins, mask=None): """ Clamp array values that fall within a given mask in the range [0..bins-1] and reset masked values to -1. Parameters ---------- x : ndarray The input array bins : number Desired number of bins mask : ndarray, tuple or slice Anything such that x[mask] is an array. Returns ------- y : ndarray Clamped array, masked items are assigned -1 bins : number Adjusted number of bins """ if bins > np.iinfo(np.short).max: raise ValueError('Too large a bin size') y = -np.ones(x.shape, dtype=CLAMP_DTYPE) if mask is None: y, bins = _clamp(x, y, bins) else: ym = y[mask] xm = x[mask] ym, bins = _clamp(xm, ym, bins) y[mask] = ym return y, bins def ideal_spacing(data, npoints): """ Tune spacing factors so that the number of voxels in the output block matches a given number. Parameters ---------- data : ndarray or sequence Data image to subsample npoints : number Target number of voxels (negative values will be ignored) Returns ------- spacing: ndarray Spacing factors """ dims = data.shape actual_npoints = (data >= 0).sum() spacing = np.ones(3, dtype='uint') while actual_npoints > npoints: # Subsample the direction with the highest number of samples ddims = dims / spacing if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]: dir = 0 elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]: dir = 1 else: dir = 2 spacing[dir] += 1 subdata = data[::spacing[0], ::spacing[1], ::spacing[2]] actual_npoints = (subdata >= 0).sum() return spacing def smallest_bounding_box(msk): """ Extract the smallest bounding box from a mask Parameters ---------- msk : ndarray Array of boolean Returns ------- corner: ndarray 3-dimensional coordinates of bounding box corner size: ndarray 3-dimensional size of bounding box """ x, y, z = np.where(msk > 0) corner = np.array([x.min(), y.min(), z.min()]) size = np.array([x.max() + 1, y.max() + 1, z.max() + 1]) return corner, size def approx_gradient(f, x, epsilon): """ Approximate the gradient of a function using central finite differences Parameters ---------- f: callable The function to differentiate x: ndarray Point where the function gradient is to be evaluated epsilon: float Stepsize for finite differences Returns ------- g: ndarray Function gradient at `x` """ n = len(x) g = np.zeros(n) ei = np.zeros(n) for i in range(n): ei[i] = .5 * epsilon g[i] = (f(x + ei) - f(x - ei)) / epsilon ei[i] = 0 return g def approx_hessian_diag(f, x, epsilon): """ Approximate the Hessian diagonal of a function using central finite differences Parameters ---------- f: callable The function to differentiate x: ndarray Point where the Hessian is to be evaluated epsilon: float Stepsize for finite differences Returns ------- h: ndarray Diagonal of the Hessian at `x` """ n = len(x) h = np.zeros(n) ei = np.zeros(n) fx = f(x) for i in range(n): ei[i] = epsilon h[i] = (f(x + ei) + f(x - ei) - 2 * fx) / (epsilon ** 2) ei[i] = 0 return h def approx_hessian(f, x, epsilon): """ Approximate the full Hessian matrix of a function using central finite differences Parameters ---------- f: callable The function to differentiate x: ndarray Point where the Hessian is to be evaluated epsilon: float Stepsize for finite differences Returns ------- H: ndarray Hessian matrix at `x` """ n = len(x) H = np.zeros((n, n)) ei = np.zeros(n) for i in range(n): ei[i] = .5 * epsilon g1 = approx_gradient(f, x + ei, epsilon) g2 = approx_gradient(f, x - ei, epsilon) H[i, :] = (g1 - g2) / epsilon ei[i] = 0 return H def smooth_image(data, affine, sigma): """ Smooth an image by an isotropic Gaussian filter Parameters ---------- data: ndarray Image data array affine: ndarray Image affine transform sigma: float Filter standard deviation in mm Returns ------- sdata: ndarray Smoothed data array """ sigma_vox = sigma / np.sqrt(np.sum(affine[0:3, 0:3] ** 2, 0)) return nd.gaussian_filter(data, sigma_vox) nipy-0.6.1/nipy/algorithms/registration/joint_histogram.c000066400000000000000000000243261470056100100237040ustar00rootroot00000000000000#include "joint_histogram.h" #include "wichmann_prng.h" #include #include #include #define SQR(a) ((a)*(a)) #define FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) #define UROUND(a) ((int)(a+0.5)) #define ROUND(a)(FLOOR(a+0.5)) #ifdef _MSC_VER #define inline __inline #endif static inline void _pv_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); static inline void _tri_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); static inline void _rand_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); /* JOINT HISTOGRAM COMPUTATION. iterI : assumed to iterate over a signed short encoded, possibly non-contiguous array. imJ_padded : assumed C-contiguous (last index varies faster) & signed short encoded. H : assumed C-contiguous. Tvox : assumed C-contiguous: either a 3x4=12-sized array (or bigger) for an affine transformation or a 3xN array for a pre-computed transformation, with N equal to the size of the array corresponding to iterI (no checking done) Negative intensities are ignored. */ #define APPEND_NEIGHBOR(q, w) \ j = J[q]; \ if (j>=0) { \ *bufJnn = j; bufJnn ++; \ *bufW = w; bufW ++; \ nn ++; } int joint_histogram(PyArrayObject* JH, unsigned int clampI, unsigned int clampJ, PyArrayIterObject* iterI, const PyArrayObject* imJ_padded, const PyArrayObject* Tvox, long interp) { /* Since PyArray_DATA() and PyArray_DIMS() are simple accessors, it is OK to * cast away const as long as we treat the results as const. */ const signed short* J=PyArray_DATA((PyArrayObject*) imJ_padded); const npy_intp* dimJ = PyArray_DIMS((PyArrayObject*) imJ_padded); size_t dimJX=dimJ[0]-2; size_t dimJY=dimJ[1]-2; size_t dimJZ=dimJ[2]-2; signed short Jnn[8]; double W[8]; signed short *bufI, *bufJnn; double *bufW; signed short i, j; size_t off; size_t u2 = dimJ[2]; size_t u3 = u2+1; size_t u4 = dimJ[1]*u2; size_t u5 = u4+1; size_t u6 = u4+u2; size_t u7 = u6+1; double wx, wy, wz, wxwy, wxwz, wywz; double W0, W2, W3, W4; int nn, nx, ny, nz; double *H = PyArray_DATA(JH); double Tx, Ty, Tz; const double *tvox = PyArray_DATA((PyArrayObject*) Tvox); void (*interpolate)(unsigned int, double*, unsigned int, const signed short*, const double*, int, void*); void* interp_params = NULL; prng_state rng; /* Check assumptions regarding input arrays. If it fails, the function will return -1 without doing anything else. iterI : assumed to iterate over a signed short encoded, possibly non-contiguous array. imJ_padded : assumed C-contiguous (last index varies faster) & signed short encoded. H : assumed C-contiguous. Tvox : assumed C-contiguous: either a 3x4=12-sized array (or bigger) for an affine transformation or a 3xN array for a pre-computed transformation, with N equal to the size of the array corresponding to iterI (no checking done) */ if (PyArray_TYPE(iterI->ao) != NPY_SHORT) { fprintf(stderr, "Invalid type for the array iterator\n"); return -1; } if ( (!PyArray_ISCONTIGUOUS(imJ_padded)) || (!PyArray_ISCONTIGUOUS(JH)) || (!PyArray_ISCONTIGUOUS(Tvox)) ) { fprintf(stderr, "Some non-contiguous arrays\n"); return -1; } /* Reset the source image iterator */ PyArray_ITER_RESET(iterI); /* Set interpolation method */ if (interp==0) interpolate = &_pv_interpolation; else if (interp>0) interpolate = &_tri_interpolation; else { /* interp < 0 */ interpolate = &_rand_interpolation; prng_seed(-interp, &rng); interp_params = (void*)(&rng); } /* Re-initialize joint histogram */ memset((void*)H, 0, clampI*clampJ*sizeof(double)); /* Loop over source voxels */ while(iterI->index < iterI->size) { /* Source voxel intensity */ bufI = (signed short*)PyArray_ITER_DATA(iterI); i = bufI[0]; /* Compute the transformed grid coordinates of current voxel */ Tx = *tvox; tvox++; Ty = *tvox; tvox++; Tz = *tvox; tvox++; /* Test whether the current voxel is below the intensity threshold, or the transformed point is completely outside the reference grid */ if ((i>=0) && (Tx>-1) && (Tx-1) && (Ty-1) && (Tz x */ /*** Trilinear interpolation weights. Note: wx = nnx + 1 - Tx, where nnx is the location in the NON-PADDED grid */ wx = nx - Tx; wy = ny - Ty; wz = nz - Tz; wxwy = wx*wy; wxwz = wx*wz; wywz = wy*wz; /*** Prepare buffers */ bufJnn = Jnn; bufW = W; /*** Initialize neighbor list */ off = nx*u4 + ny*u2 + nz; nn = 0; /*** Neighbor 0: (0,0,0) */ W0 = wxwy*wz; APPEND_NEIGHBOR(off, W0); /*** Neighbor 1: (0,0,1) */ APPEND_NEIGHBOR(off+1, wxwy-W0); /*** Neighbor 2: (0,1,0) */ W2 = wxwz-W0; APPEND_NEIGHBOR(off+u2, W2); /*** Neighbor 3: (0,1,1) */ W3 = wx-wxwy-W2; APPEND_NEIGHBOR(off+u3, W3); /*** Neighbor 4: (1,0,0) */ W4 = wywz-W0; APPEND_NEIGHBOR(off+u4, W4); /*** Neighbor 5: (1,0,1) */ APPEND_NEIGHBOR(off+u5, wy-wxwy-W4); /*** Neighbor 6: (1,1,0) */ APPEND_NEIGHBOR(off+u6, wz-wxwz-W4); /*** Neighbor 7: (1,1,1) */ APPEND_NEIGHBOR(off+u7, 1-W3-wy-wz+wywz); /* Update the joint histogram using the desired interpolation technique */ interpolate(i, H, clampJ, Jnn, W, nn, interp_params); } /* End of IF TRANSFORMS INSIDE */ /* Update source index */ PyArray_ITER_NEXT(iterI); } /* End of loop over voxels */ return 0; } /* Partial Volume interpolation. See Maes et al, IEEE TMI, 2007. */ static inline void _pv_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params) { int k; unsigned int clampJ_i = clampJ*i; const signed short *bufJ = J; const double *bufW = W; for(k=0; k 0.0) { jm /= sumW; H[UROUND(jm)+clampJ_i] += 1; } return; } /* Random interpolation. */ static inline void _rand_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params) { prng_state* rng = (prng_state*)params; int k; unsigned int clampJ_i = clampJ*i; const double *bufW; double sumW, draw; for(k=0, bufW=W, sumW=0.0; k draw) break; } H[J[k]+clampJ_i] += 1; return; } /* A function to compute the weighted median in one-dimensional histogram. */ int L1_moments(double* n_, double* median_, double* dev_, const PyArrayObject* H) { int i, med; double median, dev, n, cpdf, lim; const double *buf; const double* h; unsigned int size; unsigned int offset; if (PyArray_TYPE(H) != NPY_DOUBLE) { fprintf(stderr, "Input array should be double\n"); return -1; } /* Initialize */ /* Since PyArray_DATA(), PyArray_DIMS(), and PyArray_STRIDE() are simple * accessors, it is OK to cast away const as long as we treat the results as * const (for those accessors returning pointer types). */ h = PyArray_DATA((PyArrayObject*) H); size = PyArray_DIM((PyArrayObject*) H, 0); offset = PyArray_STRIDE((PyArrayObject*) H, 0)/sizeof(double); n = median = dev = 0; cpdf = 0; buf = h; for (i=0; i= n/2 */ if (n > 0) { lim = 0.5*n; i = 0; buf = h; cpdf = *buf; dev = 0; while (cpdf < lim) { i ++; buf += offset; cpdf += *buf; dev += - i*(*buf); } /* We then have: i-1 < med < i and choose i as the median (alternatively, an interpolation between i-1 and i could be performed by linearly approximating the cumulative function). The L1 deviation reads: sum*E(|X-med|) = - sum_{i<=med} i h(i) [1] + sum_{i>med} i h(i) [2] + med * [2*cpdf(med) - sum] [3] Term [1] is currently equal to `dev` variable. */ median = (double)i; dev += (2*cpdf - n)*median; med = i+1; /* Complete computation of the L1 deviation by computing the truncated mean [2]) */ if (med < size) { buf = h + med*offset; for (i=med; i /* * Use extension numpy symbol table */ #define NO_IMPORT_ARRAY #include "_registration.h" #include /* Update a pre-allocated joint histogram. Important notice: in all computations, H will be assumed C-contiguous. This means that it is contiguous and that, in C convention (row-major order, i.e. column indices are fastest): i (source intensities) are row indices j (target intensities) are column indices interp: 0 - PV interpolation 1 - TRILINEAR interpolation <0 - RANDOM interpolation with seed=-interp */ extern int joint_histogram(PyArrayObject* H, unsigned int clampI, unsigned int clampJ, PyArrayIterObject* iterI, const PyArrayObject* imJ_padded, const PyArrayObject* Tvox, long interp); extern int L1_moments(double* n_, double* median_, double* dev_, const PyArrayObject* H); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/registration/meson.build000066400000000000000000000013621470056100100224750ustar00rootroot00000000000000target_dir = 'nipy/algorithms/registration' py.extension_module('_registration', [ cython_gen.process('_registration.pyx'), 'joint_histogram.c', 'wichmann_prng.c', 'cubic_spline.c', 'polyaffine.c' ], c_args: cython_c_args, include_directories: ['.', incdir_numpy], install: true, subdir: target_dir ) python_sources = [ '__init__.py', 'affine.py', 'chain_transform.py', 'groupwise_registration.py', 'histogram_registration.py', 'optimizer.py', 'polyaffine.py', 'resample.py', 'scripting.py', 'similarity_measures.py', 'transform.py', 'type_check.py' ] py.install_sources( python_sources, pure: false, subdir: target_dir ) install_subdir('tests', install_dir: install_root / target_dir) nipy-0.6.1/nipy/algorithms/registration/optimizer.py000066400000000000000000000025111470056100100227240ustar00rootroot00000000000000from scipy.optimize import fmin as fmin_simplex from scipy.optimize import fmin_bfgs, fmin_cg, fmin_ncg, fmin_powell from ..optimize import fmin_steepest def subdict(dic, keys): sdic = {} for k in keys: sdic[k] = dic[k] return sdic def configure_optimizer(optimizer, fprime=None, fhess=None, **kwargs): """ Return the minimization function """ args = [] kwargs['fprime'] = fprime kwargs['fhess'] = fhess kwargs['avextol'] = kwargs['xtol'] if optimizer == 'simplex': keys = ('xtol', 'ftol', 'maxiter', 'maxfun') fmin = fmin_simplex elif optimizer == 'powell': keys = ('xtol', 'ftol', 'maxiter', 'maxfun') fmin = fmin_powell elif optimizer == 'cg': keys = ('gtol', 'maxiter', 'fprime') fmin = fmin_cg elif optimizer == 'bfgs': keys = ('gtol', 'maxiter', 'fprime') fmin = fmin_bfgs elif optimizer == 'ncg': args = [fprime] keys = ('avextol', 'maxiter', 'fhess') fmin = fmin_ncg elif optimizer == 'steepest': keys = ('xtol', 'ftol', 'maxiter', 'fprime') fmin = fmin_steepest else: raise ValueError(f'unknown optimizer: {optimizer}') return fmin, args, subdict(kwargs, keys) def use_derivatives(optimizer): return optimizer not in ('simplex', 'powell') nipy-0.6.1/nipy/algorithms/registration/polyaffine.c000066400000000000000000000053241470056100100226350ustar00rootroot00000000000000#include "polyaffine.h" #include #include #define TINY 1e-200 static double _gaussian(const double* xyz, const double* center, const double* sigma) { double aux, d2 = 0.0; int i; for (i=0; i<3; i++) { aux = xyz[i] - center[i]; aux /= sigma[i]; d2 += aux*aux; } return exp(-.5*d2); } /* Compute: y += w*x */ static void _add_weighted_affine(double* y, const double* x, double w) { int i; for (i=0; i<12; i++) y[i] += w*x[i]; return; } /* Compute: y = mat*x */ static void _apply_affine(double *y, const double* mat, const double* x, double W) { y[0] = mat[0]*x[0]+mat[1]*x[1]+mat[2]*x[2]+mat[3]; y[1] = mat[4]*x[0]+mat[5]*x[1]+mat[6]*x[2]+mat[7]; y[2] = mat[8]*x[0]+mat[9]*x[1]+mat[10]*x[2]+mat[11]; if (Windex < iter_xyz->size) { xyz = PyArray_ITER_DATA(iter_xyz); PyArray_ITER_RESET(iter_centers); PyArray_ITER_RESET(iter_affines); memset((void*)mat, 0, bytes_mat); W = 0.0; /* Loop over centers */ while(iter_centers->index < iter_centers->size) { center = PyArray_ITER_DATA(iter_centers); affine = PyArray_ITER_DATA(iter_affines); w = _gaussian(xyz, center, sigma); W += w; _add_weighted_affine(mat, affine, w); PyArray_ITER_NEXT(iter_centers); PyArray_ITER_NEXT(iter_affines); } /* Apply matrix */ _apply_affine(t_xyz, mat, xyz, W); memcpy((void*)xyz, (void*)t_xyz, bytes_xyz); /* Update xyz iterator */ PyArray_ITER_NEXT(iter_xyz); } /* Free memory */ Py_XDECREF(iter_xyz); Py_XDECREF(iter_centers); Py_XDECREF(iter_affines); return; } nipy-0.6.1/nipy/algorithms/registration/polyaffine.h000066400000000000000000000006631470056100100226430ustar00rootroot00000000000000#ifndef POLYAFFINE #define POLYAFFINE #ifdef __cplusplus extern "C" { #endif #include /* * Use extension numpy symbol table */ #define NO_IMPORT_ARRAY #include "_registration.h" #include extern void apply_polyaffine(PyArrayObject* XYZ, const PyArrayObject* Centers, const PyArrayObject* Affines, const PyArrayObject* Sigma); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/registration/polyaffine.py000066400000000000000000000072261470056100100230460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ._registration import _apply_polyaffine from .affine import apply_affine from .transform import Transform TINY_SIGMA = 1e-200 class PolyAffine(Transform): def __init__(self, centers, affines, sigma, glob_affine=None): """ centers: N times 3 array We are given a set of affine transforms T_i with centers x_i, all in homogeneous coordinates. The polyaffine transform is defined, up to a right composition with a global affine, as: T(x) = sum_i w_i(x) T_i x where w_i(x) = g(x-x_i)/Z(x) are normalized Gaussian weights that sum up to one for every x. """ # Format input arguments self.centers = np.asarray(centers, dtype='double', order='C') self.sigma = np.zeros(3) self.sigma[:] = np.maximum(TINY_SIGMA, sigma) if hasattr(affines[0], 'as_affine'): affines = np.array([a.as_affine() for a in affines]) else: affines = np.asarray(affines) if hasattr(glob_affine, 'as_affine'): self.glob_affine = glob_affine.as_affine() else: self.glob_affine = glob_affine # Cache a (N, 12) matrix containing the affines coefficients, # should be C-contiguous double. self._affines = np.zeros((len(self.centers), 12)) self._affines[:] = np.reshape(affines[:, 0:3, :], (len(self.centers), 12)) def affine(self, i): aff = np.eye(4) aff[0:3, :] = self._affines[i].reshape(3, 4) return aff def affines(self): return [self.affine(i) for i in range(len(self.centers))] def apply(self, xyz): """ xyz is an (N, 3) array """ # txyz should be double C-contiguous for the the cython # routine _apply_polyaffine if self.glob_affine is None: txyz = np.array(xyz, copy=True, dtype='double', order='C') else: txyz = apply_affine(self.glob_affine, xyz) _apply_polyaffine(txyz, self.centers, self._affines, self.sigma) return txyz def compose(self, other): """ Compose this transform onto another Parameters ---------- other : Transform transform that we compose onto Returns ------- composed_transform : Transform a transform implementing the composition of self on `other` """ # If other is not an Affine, use the generic compose method if not hasattr(other, 'as_affine'): return Transform(self.apply).compose(other) # Affine case: the result is a polyaffine transform with same # local affines if self.glob_affine is None: glob_affine = other.as_affine() else: glob_affine = np.dot(self.glob_affine, other.as_affine()) return self.__class__(self.centers, self.affines(), self.sigma, glob_affine=glob_affine) def left_compose(self, other): # If other is not an Affine, use the generic compose method if not hasattr(other, 'as_affine'): return Transform(other.apply).compose(self) # Affine case: the result is a polyaffine transform with same # global affine other_affine = other.as_affine() affines = [np.dot(other_affine, self.affine(i)) \ for i in range(len(self.centers))] return self.__class__(self.centers, affines, self.sigma, glob_affine=self.glob_affine) nipy-0.6.1/nipy/algorithms/registration/resample.py000066400000000000000000000136121470056100100225160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nibabel.casting import shared_range from scipy.ndimage import affine_transform, map_coordinates from ...core.image.image_spaces import as_xyz_image, make_xyz_image, xyz_affine from ._registration import _cspline_resample3d, _cspline_sample3d, _cspline_transform from .affine import Affine, inverse_affine INTERP_ORDER = 3 def cast_array(arr, dtype): """ arr : array Input array dtype : dtype Desired dtype """ if dtype.kind in 'iu': mn, mx = shared_range(arr.dtype, dtype) return np.clip(np.round(arr), mn, mx).astype(dtype) else: return arr.astype(dtype) def resample(moving, transform=None, reference=None, mov_voxel_coords=False, ref_voxel_coords=False, dtype=None, interp_order=INTERP_ORDER, mode='constant', cval=0.): """ Resample `movimg` into voxel space of `reference` using `transform` Apply a transformation to the image considered as 'moving' to bring it into the same grid as a given `reference` image. The transformation usually maps world space in `reference` to world space in `movimg`, but can also be a voxel to voxel mapping (see parameters below). This function uses scipy.ndimage except for the case `interp_order==3`, where a fast cubic spline implementation is used. Parameters ---------- moving: nipy-like image Image to be resampled. transform: transform object or None Represents a transform that goes from the `reference` image to the `moving` image. None means an identity transform. Otherwise, it should have either an `apply` method, or an `as_affine` method or be a shape (4, 4) array. By default, `transform` maps between the output (world) space of `reference` and the output (world) space of `moving`. If `mov_voxel_coords` is True, maps to the *voxel* space of `moving` and if `ref_vox_coords` is True, maps from the *voxel* space of `reference`. reference : None or nipy-like image or tuple, optional The reference image defines the image dimensions and xyz affine to which to resample. It can be input as a nipy-like image or as a tuple (shape, affine). If None, use `movimg` to define these. mov_voxel_coords : boolean, optional True if the transform maps to voxel coordinates, False if it maps to world coordinates. ref_voxel_coords : boolean, optional True if the transform maps from voxel coordinates, False if it maps from world coordinates. interp_order: int, optional Spline interpolation order, defaults to 3. mode : str, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. cval : scalar, optional Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. Returns ------- aligned_img : Image Image resliced to `reference` with reference-to-movimg transform `transform` """ # Function assumes xyz_affine for inputs moving = as_xyz_image(moving) mov_aff = xyz_affine(moving) if reference is None: reference = moving if isinstance(reference, (tuple, list)): ref_shape, ref_aff = reference else: # Expecting image. Must be an image that can make an xyz_affine reference = as_xyz_image(reference) ref_shape = reference.shape ref_aff = xyz_affine(reference) if not len(ref_shape) == 3 or not ref_aff.shape == (4, 4): raise ValueError('Input image should be 3D') data = moving.get_fdata() if dtype is None: dtype = data.dtype # Assume identity transform by default if transform is None: transform = Affine() # Detect what kind of input transform affine = False if hasattr(transform, 'as_affine'): Tv = transform.as_affine() affine = True else: Tv = transform if hasattr(Tv, 'shape'): if Tv.shape == (4, 4): affine = True # Case: affine transform if affine: if not ref_voxel_coords: Tv = np.dot(Tv, ref_aff) if not mov_voxel_coords: Tv = np.dot(inverse_affine(mov_aff), Tv) if (interp_order, mode, cval) == (3, 'constant', 0): # we can use short cut output = np.zeros(ref_shape, dtype='double') output = cast_array(_cspline_resample3d(output, data, ref_shape, Tv), dtype) else: output = np.zeros(ref_shape, dtype=dtype) affine_transform(data, Tv[0:3, 0:3], offset=Tv[0:3, 3], order=interp_order, output_shape=ref_shape, output=output, mode=mode, cval=cval) # Case: non-affine transform else: if not ref_voxel_coords: Tv = Tv.compose(Affine(ref_aff)) if not mov_voxel_coords: Tv = Affine(inverse_affine(mov_aff)).compose(Tv) coords = np.indices(ref_shape).transpose((1, 2, 3, 0)) coords = np.reshape(coords, (np.prod(ref_shape), 3)) coords = Tv.apply(coords).T if (interp_order, mode, cval) == (3, 'constant', 0): # we can use short cut cbspline = _cspline_transform(data) output = np.zeros(ref_shape, dtype='double') output = cast_array(_cspline_sample3d(output, cbspline, *coords), dtype) else: # No short-cut, use map_coordinates output = map_coordinates(data, coords, order=interp_order, output=dtype, mode=mode, cval=cval) output.shape = ref_shape return make_xyz_image(output, ref_aff, 'scanner') nipy-0.6.1/nipy/algorithms/registration/scripting.py000066400000000000000000000145661470056100100227210ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ A scripting wrapper around 4D registration (SpaceTimeRealign) """ import os import os.path as op import nibabel as nib import nibabel.eulerangles as euler import numpy as np import numpy.linalg as npl from nibabel.filename_parser import splitext_addext from nibabel.optpkg import optional_package matplotlib, HAVE_MPL, _ = optional_package('matplotlib') import nipy.algorithms.slicetiming as st from nipy.io.api import save_image from .groupwise_registration import SpaceTimeRealign timefuncs = st.timefuncs.SLICETIME_FUNCTIONS __all__ = ["space_time_realign", "aff2euler"] def aff2euler(affine): """ Compute Euler angles from 4 x 4 `affine` Parameters ---------- affine : 4 by 4 array An affine transformation matrix Returns ------- The Euler angles associated with the affine """ return euler.mat2euler(aff2rot_zooms(affine)[0]) def aff2rot_zooms(affine): """ Compute a rotation matrix and zooms from 4 x 4 `affine` Parameters ---------- affine : 4 by 4 array An affine transformation matrix Returns ------- R: 3 by 3 array A rotation matrix in 3D zooms: length 3 1-d array Vector with voxel sizes. """ RZS = affine[:3, :3] zooms = np.sqrt(np.sum(RZS * RZS, axis=0)) RS = RZS / zooms # Adjust zooms to make RS correspond (below) to a true # rotation matrix. if npl.det(RS) < 0: zooms[0] *= -1 RS[:,0] *= -1 # retrieve rotation matrix from RS with polar decomposition. # Discard shears P, S, Qs = npl.svd(RS) R = np.dot(P, Qs) return R, zooms def space_time_realign(input, tr, slice_order='descending', slice_dim=2, slice_dir=1, apply=True, make_figure=False, out_name=None): """ This is a scripting interface to `nipy.algorithms.registration.SpaceTimeRealign` Parameters ---------- input : str or list A full path to a file-name (4D nifti time-series) , or to a directory containing 4D nifti time-series, or a list of full-paths to files. tr : float The repetition time slice_order : str (optional) This is the order of slice-times in the acquisition. This is used as a key into the ``SLICETIME_FUNCTIONS`` dictionary from :mod:`nipy.algorithms.slicetiming.timefuncs`. Default: 'descending'. slice_dim : int (optional) Denotes the axis in `images` that is the slice axis. In a 4D image, this will often be axis = 2 (default). slice_dir : int (optional) 1 if the slices were acquired slice 0 first (default), slice -1 last, or -1 if acquire slice -1 first, slice 0 last. apply : bool (optional) Whether to apply the transformation and produce an output. Default: True. make_figure : bool (optional) Whether to generate a .png figure with the parameters across scans. out_name : bool (optional) Specify an output location (full path) for the files that are generated. Default: generate files in the path of the inputs (with an `_mc` suffix added to the file-names. Returns ------- transforms : ndarray An (n_times_points,) shaped array containing `nipy.algorithms.registration.affine.Rigid` class instances for each time point in the time-series. These can be used as affine transforms by referring to their `.as_affine` attribute. """ if make_figure: if not HAVE_MPL: e_s ="You need to have matplotlib installed to run this function" e_s += " with `make_figure` set to `True`" raise RuntimeError(e_s) # If we got only a single file, we motion correct that one: if op.isfile(input): if not (input.endswith(('.nii', '.nii.gz'))): e_s = "Input needs to be a nifti file ('.nii' or '.nii.gz'" raise ValueError(e_s) fnames = [input] input = nib.load(input) # If this is a full-path to a directory containing files, it's still a # string: elif isinstance(input, str): list_of_files = os.listdir(input) fnames = [op.join(input, f) for f in np.sort(list_of_files) if (f.endswith(('.nii', '.nii.gz'))) ] input = [nib.load(x) for x in fnames] # Assume that it's a list of full-paths to files: else: input = [nib.load(x) for x in input] slice_times = timefuncs[slice_order] slice_info = [slice_dim, slice_dir] reggy = SpaceTimeRealign(input, tr, slice_times, slice_info) reggy.estimate(align_runs=True) # We now have the transformation parameters in here: transforms = np.squeeze(np.array(reggy._transforms)) rot = np.array([t.rotation for t in transforms]) trans = np.array([t.translation for t in transforms]) if apply: new_reggy = reggy.resample(align_runs=True) for run_idx, new_im in enumerate(new_reggy): # Fix output TR - it was probably lost in the image realign step assert new_im.affine.shape == (5, 5) new_im.affine[:] = new_im.affine.dot(np.diag([1, 1, 1, tr, 1])) # Save it out to a '.nii.gz' file: froot, ext, trail_ext = splitext_addext(fnames[run_idx]) path, fname = op.split(froot) # We retain the file-name adding '_mc' regardless of where it's # saved new_path = path if out_name is None else out_name save_image(new_im, op.join(new_path, fname + '_mc.nii.gz')) if make_figure: # Delay MPL plotting import to latest moment to avoid errors trying # import the default MPL backend (such as tkinter, which may not be # installed). See: https://github.com/nipy/nipy/issues/414 import matplotlib.pyplot as plt figure, ax = plt.subplots(2) figure.set_size_inches([8, 6]) ax[0].plot(rot) ax[0].set_xlabel('Time (TR)') ax[0].set_ylabel('Translation (mm)') ax[1].plot(trans) ax[1].set_xlabel('Time (TR)') ax[1].set_ylabel('Rotation (radians)') figure.savefig(op.join(os.path.split(fnames[0])[0], 'mc_params.png')) return transforms nipy-0.6.1/nipy/algorithms/registration/similarity_measures.py000066400000000000000000000147701470056100100250060ustar00rootroot00000000000000import numpy as np from scipy.ndimage import gaussian_filter from ._registration import _L1_moments TINY = float(np.finfo(np.double).tiny) SIGMA_FACTOR = 0.05 # A lambda function to force positive values nonzero = lambda x: np.maximum(x, TINY) def correlation2loglikelihood(rho2, npts): """ Re-normalize correlation. Convert a squared normalized correlation to a proper log-likelihood associated with a registration problem. The result is a function of both the input correlation and the number of points in the image overlap. See: Roche, medical image registration through statistical inference, 2001. Parameters ---------- rho2: float Squared correlation measure npts: int Number of points involved in computing `rho2` Returns ------- ll: float Log-likelihood re-normalized `rho2` """ return -.5 * npts * np.log(nonzero(1 - rho2)) def dist2loss(q, qI=None, qJ=None): """ Convert a joint distribution model q(i,j) into a pointwise loss: L(i,j) = - log q(i,j)/(q(i)q(j)) where q(i) = sum_j q(i,j) and q(j) = sum_i q(i,j) See: Roche, medical image registration through statistical inference, 2001. """ qT = q.T if qI is None: qI = q.sum(0) if qJ is None: qJ = q.sum(1) q /= nonzero(qI) qT /= nonzero(qJ) return -np.log(nonzero(q)) class SimilarityMeasure: """ Template class """ def __init__(self, shape, renormalize=False, dist=None): self.shape = shape self.J, self.I = np.indices(shape) self.renormalize = renormalize if dist is None: self.dist = None else: self.dist = dist.copy() def loss(self, H): return np.zeros(H.shape) def npoints(self, H): return H.sum() def __call__(self, H): total_loss = np.sum(H * self.loss(H)) if not self.renormalize: total_loss /= nonzero(self.npoints(H)) return -total_loss class SupervisedLikelihoodRatio(SimilarityMeasure): """ Assume a joint intensity distribution model is given by self.dist """ def loss(self, H): if not hasattr(self, 'L'): if self.dist is None: raise ValueError('SupervisedLikelihoodRatio: dist attribute cannot be None') if not self.dist.shape == H.shape: raise ValueError('SupervisedLikelihoodRatio: wrong shape for dist attribute') self.L = dist2loss(self.dist) return self.L class MutualInformation(SimilarityMeasure): """ Use the normalized joint histogram as a distribution model """ def loss(self, H): return dist2loss(H / nonzero(self.npoints(H))) class ParzenMutualInformation(SimilarityMeasure): """ Use Parzen windowing to estimate the distribution model """ def loss(self, H): if not hasattr(self, 'sigma'): self.sigma = SIGMA_FACTOR * np.array(H.shape) npts = nonzero(self.npoints(H)) Hs = H / npts gaussian_filter(Hs, sigma=self.sigma, mode='constant', output=Hs) return dist2loss(Hs) class DiscreteParzenMutualInformation(SimilarityMeasure): """ Use Parzen windowing in the discrete case to estimate the distribution model """ def loss(self, H): if not hasattr(self, 'sigma'): self.sigma = SIGMA_FACTOR * np.array(H.shape) Hs = gaussian_filter(H, sigma=self.sigma, mode='constant') Hs /= nonzero(Hs.sum()) return dist2loss(Hs) class NormalizedMutualInformation(SimilarityMeasure): """ NMI = 2*(1 - H(I,J)/[H(I)+H(J)]) = 2*MI/[H(I)+H(J)]) """ def __call__(self, H): H = H / nonzero(self.npoints(H)) hI = H.sum(0) hJ = H.sum(1) entIJ = -np.sum(H * np.log(nonzero(H))) entI = -np.sum(hI * np.log(nonzero(hI))) entJ = -np.sum(hJ * np.log(nonzero(hJ))) return 2 * (1 - entIJ / nonzero(entI + entJ)) class CorrelationCoefficient(SimilarityMeasure): """ Use a bivariate Gaussian as a distribution model """ def loss(self, H): rho2 = self(H) I = (self.I - self.mI) / np.sqrt(nonzero(self.vI)) J = (self.J - self.mJ) / np.sqrt(nonzero(self.vJ)) L = rho2 * I ** 2 + rho2 * J ** 2 - 2 * self.rho * I * J tmp = nonzero(1. - rho2) L *= .5 / tmp L += .5 * np.log(tmp) return L def __call__(self, H): npts = nonzero(self.npoints(H)) mI = np.sum(H * self.I) / npts mJ = np.sum(H * self.J) / npts vI = np.sum(H * (self.I) ** 2) / npts - mI ** 2 vJ = np.sum(H * (self.J) ** 2) / npts - mJ ** 2 cIJ = np.sum(H * self.J * self.I) / npts - mI * mJ rho2 = (cIJ / nonzero(np.sqrt(vI * vJ))) ** 2 if self.renormalize: rho2 = correlation2loglikelihood(rho2, npts) return rho2 class CorrelationRatio(SimilarityMeasure): """ Use a nonlinear regression model with Gaussian errors as a distribution model """ def __call__(self, H): npts_J = np.sum(H, 1) tmp = nonzero(npts_J) mI_J = np.sum(H * self.I, 1) / tmp vI_J = np.sum(H * (self.I) ** 2, 1) / tmp - mI_J ** 2 npts = np.sum(npts_J) tmp = nonzero(npts) hI = np.sum(H, 0) hJ = np.sum(H, 1) mI = np.sum(hI * self.I[0, :]) / tmp vI = np.sum(hI * self.I[0, :] ** 2) / tmp - mI ** 2 mean_vI_J = np.sum(hJ * vI_J) / tmp eta2 = 1. - mean_vI_J / nonzero(vI) if self.renormalize: eta2 = correlation2loglikelihood(eta2, npts) return eta2 class CorrelationRatioL1(SimilarityMeasure): """ Use a nonlinear regression model with Laplace distributed errors as a distribution model """ def __call__(self, H): moments = np.array([_L1_moments(H[j, :]) for j in range(H.shape[0])]) npts_J, mI_J, sI_J = moments[:, 0], moments[:, 1], moments[:, 2] hI = np.sum(H, 0) hJ = np.sum(H, 1) npts, mI, sI = _L1_moments(hI) mean_sI_J = np.sum(hJ * sI_J) / nonzero(npts) eta2 = 1. - mean_sI_J / nonzero(sI) if self.renormalize: eta2 = correlation2loglikelihood(eta2, npts) return eta2 similarity_measures = { 'slr': SupervisedLikelihoodRatio, 'mi': MutualInformation, 'nmi': NormalizedMutualInformation, 'pmi': ParzenMutualInformation, 'dpmi': DiscreteParzenMutualInformation, 'cc': CorrelationCoefficient, 'cr': CorrelationRatio, 'crl1': CorrelationRatioL1} nipy-0.6.1/nipy/algorithms/registration/tests/000077500000000000000000000000001470056100100214735ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/registration/tests/__init__.py000066400000000000000000000000501470056100100235770ustar00rootroot00000000000000# Init to make test directory a package nipy-0.6.1/nipy/algorithms/registration/tests/test_affine.py000066400000000000000000000126351470056100100243430ustar00rootroot00000000000000 import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from ....testing import assert_almost_equal from ..affine import ( Affine, Affine2D, Rigid, Rigid2D, Similarity, Similarity2D, rotation_mat2vec, slices2aff, subgrid_affine, ) def random_vec12(subtype='affine'): v = np.array([0,0,0,0.0,0,0,1,1,1,0,0,0]) v[0:3] = 20*np.random.rand(3) v[3:6] = np.random.rand(3) if subtype == 'similarity': v[6:9] = np.random.rand() elif subtype == 'affine': v[6:9] = np.random.rand(3) v[9:12] = np.random.rand(3) return v """ def test_rigid_compose(): T1 = Affine(random_vec12('rigid')) T2 = Affine(random_vec12('rigid')) T = T1*T2 assert_almost_equal(T.as_affine(), np.dot(T1.as_affine(), T2.as_affine())) def test_compose(): T1 = Affine(random_vec12('affine')) T2 = Affine(random_vec12('similarity')) T = T1*T2 assert_almost_equal(T.as_affine(), np.dot(T1.as_affine(), T2.as_affine())) """ def test_mat2vec(): mat = np.eye(4) tmp = np.random.rand(3,3) U, s, Vt = np.linalg.svd(tmp) U /= np.linalg.det(U) Vt /= np.linalg.det(Vt) mat[0:3,0:3] = np.dot(np.dot(U, np.diag(s)), Vt) T = Affine(mat) assert_almost_equal(T.as_affine(), mat) def test_rotation_mat2vec(): r = rotation_mat2vec(np.diag([-1,1,-1])) assert not np.isnan(r).max() def test_composed_affines(): aff1 = np.diag([2, 3, 4, 1]) aff2 = np.eye(4) aff2[:3,3] = (10, 11, 12) comped = np.dot(aff2, aff1) comped_obj = Affine(comped) assert_array_almost_equal(comped_obj.as_affine(), comped) aff1_obj = Affine(aff1) aff2_obj = Affine(aff2) re_comped = aff2_obj.compose(aff1_obj) assert_array_almost_equal(re_comped.as_affine(), comped) # Crazy, crazy, crazy aff1_remixed = aff1_obj.as_affine() aff2_remixed = aff2_obj.as_affine() comped_remixed = np.dot(aff2_remixed, aff1_remixed) assert_array_almost_equal(comped_remixed, Affine(comped_remixed).as_affine()) def test_affine_types(): pts = np.random.normal(size=(10,3)) for klass, n_params in ((Affine, 12), (Affine2D, 6), (Rigid, 6), (Rigid2D, 3), (Similarity, 7), (Similarity2D, 4), ): obj = klass() assert_array_equal(obj.param, np.zeros((n_params,))) obj.param = np.ones((n_params,)) assert_array_equal(obj.param, np.ones((n_params,))) # Check that round trip works orig_aff = obj.as_affine() obj2 = klass(orig_aff) assert_array_almost_equal(obj2.as_affine(), orig_aff) # Check inverse inv_obj = obj.inv() # Check points transform and invert pts_dash = obj.apply(pts) assert_array_almost_equal(pts, inv_obj.apply(pts_dash)) # Check composition with inverse gives identity with_inv = inv_obj.compose(obj) assert_array_almost_equal(with_inv.as_affine(), np.eye(4)) # Just check that str works without error s = str(obj) # Check default parameter input obj = klass(np.zeros((12,))) assert_array_equal(obj.param, np.zeros((n_params,))) obj = klass(list(np.zeros((12,)))) assert_array_equal(obj.param, np.zeros((n_params,))) def test_indirect_affines(): T = np.eye(4) A = np.random.rand(3,3) if np.linalg.det(A) > 0: A = -A T[:3,:3] = A obj = Affine(T) assert not obj.is_direct assert_array_almost_equal(T, obj.as_affine()) def test_slices2aff(): # Take a series of slices, return equivalent affine for N in range(1, 5): slices = [slice(None) for n in range(N)] aff = np.eye(N+1) assert_array_equal(slices2aff(slices), aff) slices = [slice(2) for n in range(N)] assert_array_equal(slices2aff(slices), aff) slices = [slice(2, 4) for n in range(N)] aff2 = aff.copy() aff2[:-1,-1] = [2] * N assert_array_equal(slices2aff(slices), aff2) slices = [slice(2, 4, 5) for n in range(N)] aff3 = np.diag([5] * N + [1]) aff3[:-1,-1] = [2] * N assert_array_equal(slices2aff(slices), aff3) slices = [slice(2.1, 11, 4.9), slice(3.2, 11, 5.8), slice(4.3, 11, 6.7)] assert_array_equal(slices2aff(slices), [[4.9, 0, 0, 2.1], [0, 5.8, 0, 3.2], [0, 0, 6.7, 4.3], [0, 0, 0, 1]]) def test_subgrid_affine(): # Takes an affine and a series of slices, creates affine from slices, # returns dot(affine, affine_from_slices) slices = [slice(2, 11, 4), slice(3, 11, 5), slice(4, 11, 6)] assert_array_equal(subgrid_affine(np.eye(4), slices), [[4, 0, 0, 2], [0, 5, 0, 3], [0, 0, 6, 4], [0, 0, 0, 1]]) assert_array_equal(subgrid_affine(np.diag([2, 3, 4, 1]), slices), [[8, 0, 0, 4], [0, 15, 0, 9], [0, 0, 24, 16], [0, 0, 0, 1]]) # Raises error for non-integer slice arguments slices[0] = slice(2.1, 11, 4) pytest.raises(ValueError, subgrid_affine, np.eye(4), slices) nipy-0.6.1/nipy/algorithms/registration/tests/test_chain_transforms.py000066400000000000000000000103601470056100100264440ustar00rootroot00000000000000""" Testing combined transformation objects The combined transform object associates a spatial transformation with the parameters of that transformation, for use in an optimizer. The combined transform object does several things. First, it can transform a coordinate array with:: transformed_pts = obj.apply(pts) Second, the transform can phrase itself as a vector of parameters that are suitable for optimization:: vec = obj.get_params() Third, the transform can be modified by setting from the optimization parameters:: obj.set_params(new_vec) new_transformed_pts = obj.apply(pts) """ import numpy as np import numpy.linalg as npl from nibabel.affines import apply_affine from numpy.testing import assert_array_almost_equal, assert_array_equal from ..affine import Affine from ..chain_transform import ChainTransform AFF1 = np.diag([2, 3, 4, 1]) AFF2 = np.eye(4) AFF2[:3,3] = (10, 11, 12) # generate a random affine with a positive determinant AFF3 = np.eye(4) AFF3[:3,3] = np.random.normal(size=(3,)) tmp = np.random.normal(size=(3,3)) AFF3[:3,:3] = np.sign(npl.det(tmp))*tmp POINTS = np.arange(12).reshape(4,3) # Make affine objects AFF1_OBJ, AFF2_OBJ, AFF3_OBJ = (Affine(a) for a in [AFF1, AFF2.copy(), AFF3]) def test_creation(): # This is the simplest possible example, where there is a thing we are # optimizing, and an optional pre and post transform # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj) # Check apply gives expected result assert_array_equal(ct.apply(POINTS), apply_affine(AFF2, POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), POINTS) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) # Check apply gives the expected results ct = ChainTransform(aff2_obj, pre=AFF1_OBJ) assert_array_almost_equal(AFF1_OBJ.as_affine(), AFF1) assert_array_almost_equal(aff2_obj.as_affine(), AFF2) tmp = np.dot(AFF2, AFF1) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF2, AFF1), POINTS)) # Check that result is changed by setting params assert_array_almost_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(AFF1, POINTS)) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj, pre=AFF1_OBJ, post=AFF3_OBJ) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, np.dot(AFF2, AFF1)), POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, AFF1), POINTS)) # Does changing params in chain object change components passed in? assert_array_equal(aff2_obj.param, np.zeros((12,))) # disabling this test because ChainTransform now returns an error if # it doesn't get an optimizable transform. """ def test_inputs(): # Check that we can pass arrays or None as pre and post assert_array_almost_equal(ChainTransform(AFF2).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=AFF1).apply(POINTS), ChainTransform(AFF2_OBJ, pre=AFF1_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=AFF1, post=AFF3).apply(POINTS), ChainTransform(AFF2_OBJ, pre=AFF1_OBJ, post=AFF3_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=None).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=None, post=None).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) """ nipy-0.6.1/nipy/algorithms/registration/tests/test_cubic_spline.py000066400000000000000000000014751470056100100255520ustar00rootroot00000000000000""" Testing """ import numpy as np from numpy.testing import assert_array_almost_equal from .._registration import _cspline_sample1d, _cspline_sample4d, _cspline_transform def test_sample1d(): a = np.random.rand(100) c = _cspline_transform(a) x = np.arange(100) b = np.zeros(100) b = _cspline_sample1d(b, c, x) assert_array_almost_equal(a, b) b = _cspline_sample1d(b, c, x, mode='nearest') assert_array_almost_equal(a, b) def test_sample4d(): a = np.random.rand(4, 5, 6, 7) c = _cspline_transform(a) x = np.mgrid[0:4, 0:5, 0:6, 0:7] b = np.zeros(a.shape) args = list(x) b = _cspline_sample4d(b, c, *args) assert_array_almost_equal(a, b) args = list(x) + ['nearest' for i in range(4)] b = _cspline_sample4d(b, c, *args) assert_array_almost_equal(a, b) nipy-0.6.1/nipy/algorithms/registration/tests/test_fmri_realign4d.py000066400000000000000000000273541470056100100260050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import nibabel as nib import numpy as np import pytest from nibabel import io_orientation from numpy.testing import assert_array_almost_equal, assert_array_equal from .... import load_image from ....core.image.image_spaces import make_xyz_image, xyz_affine from ....io.nibcompat import get_header from ....testing import funcfile from ...slicetiming.timefuncs import st_02413, st_42031, st_43210 from ..affine import Rigid from ..groupwise_registration import ( FmriRealign4d, Image4d, Realign4d, Realign4dAlgorithm, SpaceRealign, SpaceTimeRealign, make_grid, resample4d, ) IM = load_image(funcfile) IMS = [nib.Nifti1Image(np.zeros((2, 3, 4, 5)), np.eye(4)) for i in range(4)] for ix, imx in enumerate(IMS): get_header(imx)['pixdim'][4] = ix def test_futurewarning(): with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') FmriRealign4d([IM], tr=2., slice_order='ascending') assert warns.pop(0).category == FutureWarning def test_scanner_time(): im4d = Image4d(IM.get_fdata(), IM.affine, tr=3., slice_times=(0, 1, 2)) assert im4d.scanner_time(0, 0) == 0. assert im4d.scanner_time(0, im4d.tr) == 1. def test_slice_info(): im4d = Image4d(IM.get_fdata(), IM.affine, tr=3., slice_times=(0, 1, 2), slice_info=(2, -1)) assert im4d.slice_axis == 2 assert im4d.slice_direction == -1 def test_slice_timing(): affine = np.eye(4) affine[0:3, 0:3] = IM.affine[0:3, 0:3] im4d = Image4d(IM.get_fdata(), affine, tr=2., slice_times=0.0) x = resample4d(im4d, [Rigid() for i in range(IM.shape[3])]) assert_array_almost_equal(im4d.get_fdata(), x) def test_realign4d_no_time_interp(): runs = [IM, IM] R = FmriRealign4d(runs, time_interp=False) assert R.slice_times == 0 def test_realign4d_ascending(): runs = [IM, IM] R = FmriRealign4d(runs, tr=3, slice_order='ascending') assert_array_equal(R.slice_times, (0, 1, 2)) assert R.tr == 3 def test_realign4d_descending(): runs = [IM, IM] R = FmriRealign4d(runs, tr=3, slice_order='descending') assert_array_equal(R.slice_times, (2, 1, 0)) assert R.tr == 3 def test_realign4d_ascending_interleaved(): runs = [IM, IM] R = FmriRealign4d(runs, tr=3, slice_order='ascending', interleaved=True) assert_array_equal(R.slice_times, (0, 2, 1)) assert R.tr == 3 def test_realign4d_descending_interleaved(): runs = [IM, IM] R = FmriRealign4d(runs, tr=3, slice_order='descending', interleaved=True) assert_array_equal(R.slice_times, (1, 2, 0)) assert R.tr == 3 def wrong_call(slice_times=None, slice_order=None, tr_slices=None, interleaved=None, time_interp=None): runs = [IM, IM] return FmriRealign4d(runs, tr=3, slice_times=slice_times, slice_order=slice_order, tr_slices=tr_slices, interleaved=interleaved, time_interp=time_interp) def test_realign4d_incompatible_args(): pytest.raises(ValueError, wrong_call, slice_order=(0, 1, 2), interleaved=False) pytest.raises(ValueError, wrong_call, slice_times=(0, 1, 2), slice_order='ascending') pytest.raises(ValueError, wrong_call, slice_times=(0, 1, 2), slice_order=(0, 1, 2)) pytest.raises(ValueError, wrong_call, slice_times=(0, 1, 2), time_interp=True) pytest.raises(ValueError, wrong_call, slice_times=(0, 1, 2), time_interp=False) pytest.raises(ValueError, wrong_call, time_interp=True) pytest.raises(ValueError, wrong_call, slice_times=(0, 1, 2), tr_slices=1) def test_realign4d(): """ This tests whether realign4d yields the same results depending on whether the slice order is input explicitly or as slice_times='ascending'. Due to the very small size of the image used for testing (only 3 slices), optimization is numerically unstable. It seems to make the default optimizer, namely scipy.fmin.fmin_ncg, adopt a random behavior. To work around the resulting inconsistency in results, we use nipy.optimize.fmin_steepest as the optimizer, although it's generally not recommended in practice. """ runs = [IM, IM] orient = io_orientation(IM.affine) slice_axis = int(np.where(orient[:, 0] == 2)[0]) R1 = SpaceTimeRealign(runs, tr=2., slice_times='ascending', slice_info=slice_axis) R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') nslices = IM.shape[slice_axis] slice_times = (2. / float(nslices)) * np.arange(nslices) R2 = SpaceTimeRealign(runs, tr=2., slice_times=slice_times, slice_info=slice_axis) R2.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') for r in range(2): for i in range(IM.shape[3]): assert_array_almost_equal(R1._transforms[r][i].translation, R2._transforms[r][i].translation) assert_array_almost_equal(R1._transforms[r][i].rotation, R2._transforms[r][i].rotation) for i in range(IM.shape[3]): assert_array_almost_equal(R1._mean_transforms[r].translation, R2._mean_transforms[r].translation) assert_array_almost_equal(R1._mean_transforms[r].rotation, R2._mean_transforms[r].rotation) def test_realign4d_runs_with_different_affines(): aff = xyz_affine(IM) aff2 = aff.copy() aff2[0:3, 3] += 5 im2 = make_xyz_image(IM.get_fdata(), aff2, 'scanner') runs = [IM, im2] R = SpaceTimeRealign(runs, tr=2., slice_times='ascending', slice_info=2) R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') cor_im, cor_im2 = R.resample() assert_array_equal(xyz_affine(cor_im2), aff) def test_realign4d_params(): # Some tests for input parameters to realign4d R = Realign4d(IM, 3, [0, 1, 2], None) # No slice_info - OK assert R.tr == 3 # TR cannot be None pytest.raises(ValueError, Realign4d, IMS[1], None, [0, 1, 2], None) # TR cannot be zero pytest.raises(ValueError, Realign4d, IMS[1], 0, [0, 1, 2], None) # TR can be None if slice times are None R = Realign4d(IM, None, None) assert R.tr == 1 def test_spacetimerealign_params(): runs = [IM, IM] for slice_times in ('descending', '43210', st_43210, [2, 1, 0]): R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2) assert_array_equal(R.slice_times, (2, 1, 0)) assert R.tr == 3 for slice_times in ('asc_alt_2', '02413', st_02413, [0, 2, 1]): R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2) assert_array_equal(R.slice_times, (0, 2, 1)) assert R.tr == 3 for slice_times in ('desc_alt_2', '42031', st_42031, [1, 2, 0]): R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2) assert_array_equal(R.slice_times, (1, 2, 0)) assert R.tr == 3 # Check changing axis R = SpaceTimeRealign(runs, tr=21, slice_times='ascending', slice_info=1) assert_array_equal(R.slice_times, np.arange(21)) # Check slice_times and slice_info and TR required R = SpaceTimeRealign(runs, 3, 'ascending', 2) # OK pytest.raises(ValueError, SpaceTimeRealign, runs, 3, None, 2) pytest.raises(ValueError, SpaceTimeRealign, runs, 3, 'ascending', None) pytest.raises(ValueError, SpaceTimeRealign, IMS[0], None, [0, 1, 2], 2) pytest.raises(ValueError, SpaceTimeRealign, IMS[1], None, [0, 1, 2], 2) pytest.raises(ValueError, SpaceTimeRealign, IMS[2:4], None, [0, 1, 2], 2) pytest.raises(ValueError, SpaceTimeRealign, IMS[0], 'header-allow-1.0', [0, 1, 2], 2) R = SpaceTimeRealign(IMS[1], "header-allow-1.0", 'ascending', 2) assert_array_equal(R.tr, 1.0) # Test when TR and nslices are not the same R1 = SpaceTimeRealign(runs, tr=2., slice_times='ascending', slice_info=2) assert_array_equal(R1.slice_times, np.arange(3) / 3. * 2.) # Smoke test run R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') def reduced_dim(dim, subsampling, border): return max(1, int(np.ceil((dim - 2 * border) / float(subsampling)))) def test_lowlevel_params(): runs = [IM, IM] R = SpaceTimeRealign(runs, tr=21, slice_times='ascending', slice_info=1) borders=(3,2,1) R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest', borders=borders) # Test tighter borders for motion estimation r = Realign4dAlgorithm(R._runs[0], borders=borders) nvoxels = np.prod(np.array([reduced_dim(IM.shape[i], 1, borders[i]) for i in range(3)])) assert_array_equal(r.xyz.shape, (nvoxels, 3)) # Test wrong argument types raise errors pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], subsampling=(3,3,3,1)) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], refscan='first') pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], borders=(1,1,1,0)) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], xtol=None) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], ftol='dunno') pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], gtol=(.1,.1,.1)) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], stepsize=None) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], maxiter=None) pytest.raises(ValueError, Realign4dAlgorithm, R._runs[0], maxfun='none') def _test_make_grid(dims, subsampling, borders, expected_nvoxels): x = make_grid(dims, subsampling, borders) assert x.shape[0] == expected_nvoxels def test_make_grid_funfile(): dims = IM.shape[0:3] borders = (3,2,1) nvoxels = np.prod(np.array([reduced_dim(dims[i], 1, borders[i]) for i in range(3)])) _test_make_grid(dims, (1,1,1), borders, nvoxels) def test_make_grid_default(): dims = np.random.randint(100, size=3) + 1 _test_make_grid(dims, (1,1,1), (0,0,0), np.prod(dims)) def test_make_grid_random_subsampling(): dims = np.random.randint(100, size=3) + 1 subsampling = np.random.randint(5, size=3) + 1 nvoxels = np.prod(np.array([reduced_dim(dims[i], subsampling[i], 0) for i in range(3)])) _test_make_grid(dims, subsampling, (0,0,0), nvoxels) def test_make_grid_random_borders(): dims = np.random.randint(100, size=3) + 1 borders = np.minimum((dims - 1) / 2, np.random.randint(10, size=3)) nvoxels = np.prod(np.array([reduced_dim(dims[i], 1, borders[i]) for i in range(3)])) _test_make_grid(dims, (1,1,1), borders, nvoxels) def test_make_grid_full_monthy(): dims = np.random.randint(100, size=3) + 1 subsampling = np.random.randint(5, size=3) + 1 borders = np.minimum((dims - 1) / 2, np.random.randint(10, size=3)) nvoxels = np.prod(np.array([reduced_dim(dims[i], subsampling[i], borders[i]) for i in range(3)])) _test_make_grid(dims, subsampling, borders, nvoxels) def test_spacerealign(): # Check space-only realigner runs = [IM, IM] R = SpaceRealign(runs) assert R.tr == 1 assert R.slice_times == 0. # Smoke test run R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') def test_single_image(): # Check we can use a single image as argument R = SpaceTimeRealign(IM, tr=3, slice_times='ascending', slice_info=2) R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') R = SpaceRealign(IM) R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') R = Realign4d(IM, 3, [0, 1, 2], (2, 1)) R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') nipy-0.6.1/nipy/algorithms/registration/tests/test_histogram_registration.py000066400000000000000000000206741470056100100277040ustar00rootroot00000000000000 import numpy as np import pytest from numpy.testing import assert_array_equal from ....core.image.image_spaces import make_xyz_image from ....testing import assert_almost_equal from .._registration import _joint_histogram from ..affine import Affine, Rigid from ..histogram_registration import HistogramRegistration dummy_affine = np.eye(4) def make_data_bool(dx=100, dy=100, dz=50): return (np.random.rand(dx, dy, dz) - np.random.rand()) > 0 def make_data_uint8(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('uint8') def make_data_int16(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('int16') def make_data_float64(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('float64') def _test_clamping(I, thI=0.0, clI=256, mask=None): R = HistogramRegistration(I, I, from_bins=clI, from_mask=mask, to_mask=mask) R.subsample(spacing=[1, 1, 1]) Ic = R._from_data Ic2 = R._to_data[1:-1, 1:-1, 1:-1] assert_array_equal(Ic, Ic2) dyn = Ic.max() + 1 assert dyn == R._joint_hist.shape[0] assert dyn == R._joint_hist.shape[1] return Ic, Ic2 def test_clamping_uint8(): I = make_xyz_image(make_data_uint8(), dummy_affine, 'scanner') _test_clamping(I) def test_clamping_uint8_nonstd(): I = make_xyz_image(make_data_uint8(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def test_clamping_int16(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I) def test_masked_clamping_int16(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I, mask=make_data_bool()) def test_clamping_int16_nonstd(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def test_clamping_float64(): I = make_xyz_image(make_data_float64(), dummy_affine, 'scanner') _test_clamping(I) def test_clamping_float64_nonstd(): I = make_xyz_image(make_data_float64(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def _test_similarity_measure(simi, val): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_fdata().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) R.subsample(spacing=[2, 1, 3]) R.similarity = simi assert_almost_equal(R.eval(Affine()), val) def _test_renormalization1(simi): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') R = HistogramRegistration(I, I) R.subsample(spacing=[2, 1, 3]) R._set_similarity(simi, renormalize=True) assert R.eval(Affine()) > 1e5 def _test_renormalization2(simi): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') I0 = make_xyz_image(np.zeros(I.shape, dtype='int16'), dummy_affine, 'scanner') R = HistogramRegistration(I0, I) R.subsample(spacing=[2, 1, 3]) R._set_similarity(simi, renormalize=True) assert_almost_equal(R.eval(Affine()), 0) def test_correlation_coefficient(): _test_similarity_measure('cc', 1.0) def test_correlation_ratio(): _test_similarity_measure('cr', 1.0) def test_correlation_ratio_L1(): _test_similarity_measure('crl1', 1.0) def test_supervised_likelihood_ratio(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') R = HistogramRegistration(I, J, similarity='slr', dist=np.ones((256, 256)) / (256 ** 2)) assert_almost_equal(R.eval(Affine()), 0.0) pytest.raises(ValueError, HistogramRegistration, I, J, similarity='slr', dist=None) pytest.raises(ValueError, HistogramRegistration, I, J, similarity='slr', dist=np.random.rand(100, 127)) def test_normalized_mutual_information(): _test_similarity_measure('nmi', 1.0) def test_renormalized_correlation_coefficient(): _test_renormalization1('cc') _test_renormalization2('cc') def test_renormalized_correlation_ratio(): _test_renormalization1('cr') _test_renormalization2('cr') def test_renormalized_correlation_ratio_l1(): _test_renormalization1('crl1') _test_renormalization2('crl1') def test_joint_hist_eval(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_fdata().copy(), dummy_affine, 'scanner') # Obviously the data should be the same assert_array_equal(I.get_fdata(), J.get_fdata()) # Instantiate default thing R = HistogramRegistration(I, J) R.similarity = 'cc' null_affine = Affine() val = R.eval(null_affine) assert_almost_equal(val, 1.0) # Try with what should be identity R.subsample(spacing=[1, 1, 1]) assert_array_equal(R._from_data.shape, I.shape) val = R.eval(null_affine) assert_almost_equal(val, 1.0) def test_joint_hist_raw(): # Set up call to joint histogram jh_arr = np.zeros((10, 10), dtype=np.double) data_shape = (2, 3, 4) data = np.random.randint(size=data_shape, low=0, high=10).astype(np.short) data2 = np.zeros(np.array(data_shape) + 2, dtype=np.short) data2[:] = -1 data2[1:-1, 1:-1, 1:-1] = data.copy() vox_coords = np.indices(data_shape).transpose((1, 2, 3, 0)) vox_coords = np.ascontiguousarray(vox_coords.astype(np.double)) _joint_histogram(jh_arr, data.flat, data2, vox_coords, 0) assert_almost_equal(np.diag(np.diag(jh_arr)), jh_arr) def test_explore(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) T = Affine() simi, params = R.explore(T, (0, [-1, 0, 1]), (1, [-1, 0, 1])) def test_histogram_registration(): """ Test the histogram registration class. """ I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_fdata().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) pytest.raises(ValueError, R.subsample, spacing=[0, 1, 3]) def test_set_fov(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_fdata().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) R.set_fov(npoints=np.prod(I.shape)) assert R._from_data.shape == I.shape half_shape = tuple(I.shape[i] / 2 for i in range(3)) R.set_fov(spacing=(2, 2, 2)) assert R._from_data.shape == half_shape R.set_fov(corner=half_shape) assert R._from_data.shape == half_shape R.set_fov(size=half_shape) assert R._from_data.shape == half_shape def test_histogram_masked_registration(): """ Test the histogram registration class. """ I = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') J = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') mask = (np.zeros((100, 100, 50)) == 1) mask[10:20, 10:20, 10:20] = True R = HistogramRegistration(I, J, to_mask=mask, from_mask=mask) sim1 = R.eval(Affine()) I = make_xyz_image(I.get_fdata()[mask].reshape(10, 10, 10), dummy_affine, 'scanner') J = make_xyz_image(J.get_fdata()[mask].reshape(10, 10, 10), dummy_affine, 'scanner') R = HistogramRegistration(I, J) sim2 = R.eval(Affine()) assert sim1 == sim2 def test_similarity_derivatives(): """ Test gradient and Hessian computation of the registration objective function. """ I = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') J = make_xyz_image(np.ones((100, 100, 50), dtype='int16'), dummy_affine, 'scanner') R = HistogramRegistration(I, J) T = Rigid() g = R.eval_gradient(T) assert g.dtype == float assert_array_equal(g, np.zeros(6)) H = R.eval_hessian(T) assert H.dtype == float assert_array_equal(H, np.zeros((6, 6))) def test_smoothing(): """ Test smoothing the `to` image. """ I = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') T = Rigid() R = HistogramRegistration(I, I) R1 = HistogramRegistration(I, I, smooth=1) s = R.eval(T) s1 = R1.eval(T) assert_almost_equal(s, 1) assert s1 < s pytest.raises(ValueError, HistogramRegistration, I, I, smooth=-1) nipy-0.6.1/nipy/algorithms/registration/tests/test_polyaffine.py000066400000000000000000000017221470056100100252420ustar00rootroot00000000000000import numpy as np from ..affine import Affine from ..polyaffine import PolyAffine def random_affine(): T = np.eye(4) T[0:3, 0:4] = np.random.rand(3, 4) return T def id_affine(): return np.eye(4) NCENTERS = 5 NPTS = 100 centers = [np.random.rand(3) for i in range(NCENTERS)] raf = random_affine() affines = [raf for i in range(NCENTERS)] #affines = [id_affine() for i in range(NCENTERS)] sigma = 1.0 xyz = np.random.rand(NPTS, 3) # test 1: crash test create polyaffine transform T = PolyAffine(centers, affines, sigma) # test 2: crash test apply method t = T.apply(xyz) # test 3: check apply does nice job c = np.array(centers) tc = T.apply(c) qc = np.array([np.dot(a[0:3, 0:3], b) + a[0:3, 3]\ for a, b in zip(affines, centers)]) # test 4: crash test compose method A = Affine(random_affine()) TA = T.compose(A) # test 5: crash test left compose method AT = A.compose(T) z = AT.apply(xyz) za = A.compose(Affine(raf)).apply(xyz) nipy-0.6.1/nipy/algorithms/registration/tests/test_register.py000066400000000000000000000025261470056100100247350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.testing import assert_array_almost_equal from .... import load_image from ....testing import anatfile from ..histogram_registration import HistogramRegistration anat_img = load_image(anatfile) def test_registers(): # Test registration to self returns identity for cost, interp, affine_type in (('cc', 'pv', 'rigid'), ('cc', 'tri', 'rigid'), ('cc', 'rand', 'rigid'), ('cc', 'pv', 'similarity'), ('cc', 'pv', 'affine'), ('cr', 'pv', 'rigid'), ('cr', 'pv', 'rigid'), ('crl1', 'pv', 'rigid'), ('mi', 'pv', 'rigid'), ('nmi', 'pv', 'rigid'), ): R = HistogramRegistration(anat_img, anat_img, similarity=cost, interp=interp) R.subsample([2,2,2]) affine = R.optimize(affine_type) assert_array_almost_equal(affine.as_affine(), np.eye(4), 2) nipy-0.6.1/nipy/algorithms/registration/tests/test_resample.py000066400000000000000000000101251470056100100247130ustar00rootroot00000000000000""" Testing resample function """ import numpy as np from nibabel.affines import apply_affine from numpy.testing import assert_array_almost_equal, assert_array_equal from ....core.api import Image, vox2mni from ....core.image.image_spaces import as_xyz_image, xyz_affine from ..affine import Affine from ..resample import cast_array, resample from ..transform import Transform AUX = np.array([-1.9, -1.2, -1, 2.3, 2.9, 19, 100, 258, 258.2, 258.8, 1e5]) def test_cast_array_float(): assert_array_equal(cast_array(AUX, np.dtype(float)), AUX) def test_cast_array_int8(): assert_array_equal(cast_array(AUX, np.dtype('int8')), [-2, -1, -1, 2, 3, 19, 100, 127, 127, 127, 127]) def test_cast_array_uint8(): assert_array_equal(cast_array(AUX, np.dtype('uint8')), [0, 0, 0, 2, 3, 19, 100, 255, 255, 255, 255]) def test_cast_array_int16(): assert_array_equal(cast_array(AUX, np.dtype('int16')), [-2, -1, -1, 2, 3, 19, 100, 258, 258, 259, 2**15 - 1]) def test_cast_array_uint16(): assert_array_equal(cast_array(AUX, np.dtype('uint16')), [0, 0, 0, 2, 3, 19, 100, 258, 258, 259, 2**16 - 1]) def test_cast_array_int32(): assert_array_equal(cast_array(AUX, np.dtype('int32')), np.round(AUX)) def test_cast_array_uint32(): assert_array_equal(cast_array(AUX, np.dtype('uint32')), np.maximum(np.round(AUX), 0)) def _test_resample(arr, T, interp_orders): # Check basic cases of resampling img = Image(arr, vox2mni(np.eye(4))) for i in interp_orders: img2 = resample(img, T, interp_order=i) assert_array_almost_equal(img2.get_fdata(), img.get_fdata()) img_aff = as_xyz_image(img) img2 = resample(img, T, reference=(img_aff.shape, xyz_affine(img_aff)), interp_order=i) assert_array_almost_equal(img2.get_fdata(), img.get_fdata()) def test_resample_dtypes(): for arr in (np.random.rand(10, 11, 12), np.random.randint(100, size=(10, 11, 12)) - 50): _test_resample(arr, Affine(), (0, 1, 3, 5)) _test_resample(arr, Transform(lambda x : x), (0, 1, 3, 5)) class ApplyAffine(Transform): """ Class implements Transform protocol for testing affine Transforms """ def __init__(self, aff): self.func = lambda pts : apply_affine(aff, pts) def test_resample_uint_data(): arr = np.random.randint(100, size=(10, 11, 12)).astype('uint8') img = Image(arr, vox2mni(np.eye(4))) aff_obj = Affine((.5, .5, .5, .1, .1, .1, 0, 0, 0, 0, 0, 0)) for transform in aff_obj, ApplyAffine(aff_obj.as_affine()): img2 = resample(img, transform) assert(np.min(img2.get_fdata()) >= 0) assert(np.max(img2.get_fdata()) < 255) def test_resample_outvalue(): arr = np.arange(3*3*3).reshape(3,3,3) img = Image(arr, vox2mni(np.eye(4))) aff = np.eye(4) aff[0,3] = 1. for transform in (aff, ApplyAffine(aff)): for order in (1, 3): # Default interpolation outside is constant == 0 img2 = resample(img, transform, interp_order=order) arr2 = img2.get_fdata() exp_arr = np.zeros_like(arr) exp_arr[:-1,:,:] = arr[1:,:,:] assert_array_equal(arr2, exp_arr) # Test explicit constant value of 0 img2 = resample(img, transform, interp_order=order, mode='constant', cval=0.) exp_arr = np.zeros(arr.shape) exp_arr[:-1, :, :] = arr[1:, :, :] assert_array_almost_equal(img2.get_fdata(), exp_arr) # Test constant value of 1 img2 = resample(img, transform, interp_order=order, mode='constant', cval=1.) exp_arr[-1, :, :] = 1 assert_array_almost_equal(img2.get_fdata(), exp_arr) # Test nearest neighbor img2 = resample(img, transform, interp_order=order, mode='nearest') exp_arr[-1, :, :] = arr[-1, :, :] assert_array_almost_equal(img2.get_fdata(), exp_arr) nipy-0.6.1/nipy/algorithms/registration/tests/test_scripting.py000066400000000000000000000030201470056100100251010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import gc from os.path import split as psplit import nibabel.eulerangles as euler import numpy as np import numpy.testing as npt import nipy.algorithms.registration as reg from nipy.io.api import load_image, save_image from nipy.testing import funcfile def test_space_time_realign(in_tmp_path): path, fname = psplit(funcfile) original_affine = load_image(funcfile).affine path, fname = psplit(funcfile) froot, _ = fname.split('.', 1) # Make another image with .nii extension and extra dot in filename save_image(load_image(funcfile), 'my.test.nii') for in_fname, out_fname in ((funcfile, froot + '_mc.nii.gz'), ('my.test.nii', 'my.test_mc.nii.gz')): xforms = reg.space_time_realign(in_fname, 2.0, out_name='.') assert np.allclose(xforms[0].as_affine(), np.eye(4), atol=1e-7) assert not np.allclose(xforms[-1].as_affine(), np.eye(4), atol=1e-3) img = load_image(out_fname) npt.assert_almost_equal(original_affine, img.affine) # To avoid Windows "file ... used by another process" error when # removing temporary directory. gc.collect() def test_aff2euler(): xr = 0.1 yr = -1.3 zr = 3.1 scales = (2.1, 3.2, 4.4) R = np.dot(euler.euler2mat(xr, yr, zr), np.diag(scales)) aff = np.eye(4) aff[:3, :3] = R aff[:3, 3] = [11, 12, 13] npt.assert_almost_equal(reg.aff2euler(aff), (xr, yr, zr)) nipy-0.6.1/nipy/algorithms/registration/tests/test_slice_timing.py000066400000000000000000000065011470056100100255540ustar00rootroot00000000000000 import numpy as np from numpy.testing import assert_almost_equal, assert_array_equal from scipy.ndimage import gaussian_filter, gaussian_filter1d from nipy.core.api import Image, vox2scanner from ..groupwise_registration import SpaceTimeRealign def check_stc(true_signal, corrected_signal, ref_slice=0, rtol=1e-5, atol=1e-5): n_slices = true_signal.shape[2] # The reference slice should be more or less perfect assert_almost_equal( corrected_signal[..., ref_slice, :], true_signal[..., ref_slice, :]) # The other slices should be more or less right for sno in range(n_slices): if sno == ref_slice: continue # We checked this one arr0 = true_signal[..., sno, 1:-1] arr1 = corrected_signal[..., sno, 1:-1] # Intermediate test matrices for debugging abs_diff = np.abs(arr0 - arr1) rel_diff = np.abs((arr0 / arr1) - 1) abs_fails = abs_diff > atol rel_fails = rel_diff > rtol fails = abs_fails & rel_fails abs_only = abs_diff[fails] rel_only = rel_diff[fails] assert np.allclose(arr0, arr1, rtol=rtol, atol=atol) def test_slice_time_correction(): # Make smooth time course at slice resolution TR = 2. n_vols = 25 n_slices = 10 # Create single volume shape_3d = (20, 30, n_slices) spatial_sigma = 4 time_sigma = n_slices * 5 # time sigma in TRs one_vol = np.random.normal(100, 25, size=shape_3d) gaussian_filter(one_vol, spatial_sigma, output=one_vol) # Add smoothed time courses. Time courses are at time resolution of one # slice time. So, there are n_slices time points per TR. n_vol_slices = n_slices * n_vols time_courses = np.random.normal(0, 15, size=shape_3d + (n_vol_slices,)) gaussian_filter1d(time_courses, time_sigma, output=time_courses) big_data = one_vol[..., None] + time_courses # Can the first time point be approximated from the later ones? first_signal = big_data[..., 0:n_vol_slices:n_slices] for name, time_to_slice in ( ('ascending', list(range(n_slices))), ('descending', list(range(n_slices)[::-1])), ('asc_alt_2', (list(range(0, n_slices, 2)) + list(range(1, n_slices, 2)))), ('desc_alt_2', (list(range(0, n_slices, 2)) + list(range(1, n_slices, 2)))[::-1]) ): slice_to_time = np.argsort(time_to_slice) acquired_signal = np.zeros_like(first_signal) for space_sno, time_sno in enumerate(slice_to_time): acquired_signal[..., space_sno, :] = \ big_data[..., space_sno, time_sno:n_vol_slices:n_slices] # do STC - minimizer will fail acquired_image = Image(acquired_signal, vox2scanner(np.eye(5))) stc = SpaceTimeRealign(acquired_image, TR, name, 2) stc.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') # Check no motion estimated assert_array_equal([t.param for t in stc._transforms[0]], 0) corrected = stc.resample()[0].get_fdata() # check we approximate first time slice with correction assert not np.allclose(acquired_signal, corrected, rtol=1e-3, atol=0.1) check_stc(first_signal, corrected, ref_slice=slice_to_time[0], rtol=5e-4, atol=1e-6) nipy-0.6.1/nipy/algorithms/registration/tests/test_transform.py000066400000000000000000000014231470056100100251170ustar00rootroot00000000000000""" Testing """ import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from ..transform import Transform def test_transform(): t = Transform(lambda x : x+1) pts = np.random.normal(size=(10,3)) assert_array_equal(t.apply(pts), pts+1) pytest.raises(AttributeError, getattr, t, 'param') tm1 = Transform(lambda x : x-1) assert_array_equal(tm1.apply(pts), pts-1) tctm1 = t.compose(tm1) assert_array_almost_equal(tctm1.apply(pts), pts) def test_transform_other_init(): # Test we can have another init for our transform class C(Transform): def __init__(self): self.func = lambda x : x + 1 pts = np.random.normal(size=(10,3)) assert_array_equal(C().apply(pts), pts+1) nipy-0.6.1/nipy/algorithms/registration/transform.py000066400000000000000000000023061470056100100227170ustar00rootroot00000000000000""" Generic transform class This implementation specifies an API. We've done our best to avoid checking instances, so any class implementing this API should be valid in the places (like registration routines) that use transforms. If that isn't true, it's a bug. """ class Transform: """ A default transformation class This class specifies the tiny API. That is, the class should implement: * obj.param - the transformation exposed as a set of parameters. Changing param should change the transformation * obj.apply(pts) - accepts (N,3) array-like of points in 3 dimensions, returns an (N, 3) array of transformed points * obj.compose(xform) - accepts another object implementing ``apply``, and returns a new transformation object, where the resulting transformation is the composition of the ``obj`` transform onto the ``xform`` transform. """ def __init__(self, func): self.func = func def apply(self, pts): return self.func(pts) def compose(self, other): return Transform( lambda pts : self.apply(other.apply(pts))) @property def param(self): raise AttributeError('No param for generic transform') nipy-0.6.1/nipy/algorithms/registration/type_check.py000066400000000000000000000033111470056100100230170ustar00rootroot00000000000000""" Utilities to test whether a variable is of, or convertible to, a particular type """ import numpy as np def _check_type(x, t): try: y = t(x) return True except: return False def check_type(x, t, accept_none=False): """ Checks whether a variable is convertible to a certain type. A ValueError is raised if test fails. Parameters ---------- x : object Input argument to be checked. t : type Target type. accept_none : bool If True, skip errors if `x` is None. """ if accept_none: if x is None: return if not _check_type(x, t): raise ValueError(f'Argument should be convertible to {t}') def check_type_and_shape(x, t, s, accept_none=False): """ Checks whether a sequence is convertible to a numpy ndarray with given shape, and if the elements are convertible to a certain type. A ValueError is raised if test fails. Parameters ---------- x : sequence Input sequence to be checked. t : type Target element-wise type. s : sequence of ints Target shape. accept_none : bool If True, skip errors if `x` is None. """ if accept_none: if x is None: return try: shape = (int(s), ) except: shape = tuple(s) try: y = np.asarray(x) ok_type = _check_type(y[0], t) ok_shape = (y.shape == shape) except: raise ValueError('Argument should be convertible to ndarray') if not ok_type: raise ValueError(f'Array values should be convertible to {t}') if not ok_shape: raise ValueError(f'Array shape should be equivalent to {shape}') nipy-0.6.1/nipy/algorithms/registration/wichmann_prng.c000066400000000000000000000022141470056100100233260ustar00rootroot00000000000000#include "wichmann_prng.h" #include /* Assumption to be verified: ix, iy, iz, it should be set to values between 1 and 400000 */ void prng_seed(int seed, prng_state* rng) { double r, rmax=(double)RAND_MAX; int imax = 400000; srand(seed); r = (double)rand()/rmax; rng->ix = (int)(imax*r); r = (double)rand()/rmax; rng->iy = (int)(imax*r); r = (double)rand()/rmax; rng->iz = (int)(imax*r); r = (double)rand()/rmax; rng->it = (int)(imax*r); return; } double prng_double(prng_state* rng) { double W; rng->ix = 11600 * (rng->ix % 185127) - 10379 * (rng->ix / 185127); rng->iy = 47003 * (rng->iy % 45688) - 10479 * (rng->iy / 45688); rng->iz = 23000 * (rng->iz % 93368) - 19423 * (rng->iz / 93368); rng->it = 33000 * (rng->it % 65075) - 8123 * (rng->it / 65075); if (rng->ix < 0) rng->ix = rng->ix + 2147483579; if (rng->iy < 0) rng->iy = rng->iy + 2147483543; if (rng->iz < 0) rng->iz = rng->iz + 2147483423; if (rng->it < 0) rng->it = rng->it + 2147483123; W = rng->ix/2147483579. + rng->iy/2147483543. + rng->iz/2147483423. + rng->it/2147483123.; return W - (int)W; } nipy-0.6.1/nipy/algorithms/registration/wichmann_prng.h000066400000000000000000000010371470056100100233350ustar00rootroot00000000000000#ifndef WICHMANN_PRNG #define WICHMANN_PRNG #ifdef __cplusplus extern "C" { #endif /* B.A. Wichmann, I.D. Hill, Generating good pseudo-random numbers, Computational Statistics & Data Analysis, Volume 51, Issue 3, 1 December 2006, Pages 1614-1622, ISSN 0167-9473, DOI: 10.1016/j.csda.2006.05.019. */ typedef struct { int ix; int iy; int iz; int it; } prng_state; extern void prng_seed(int seed, prng_state* rng); extern double prng_double(prng_state* prng); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/resample.py000066400000000000000000000134201470056100100200010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Some simple examples and utility functions for resampling. """ import copy import numpy as np from nibabel.affines import from_matvec, to_matvec from scipy.ndimage import affine_transform from ..core.api import AffineTransform, ArrayCoordMap, CoordinateMap, Image, compose from .interpolation import ImageInterpolator def resample_img2img(source, target, order=3, mode='constant', cval=0.0): """ Resample `source` image to space of `target` image This wraps the resample function to resample one image onto another. The output of the function will give an image with shape of the target and data from the source. Parameters ---------- source : ``Image`` Image instance that is to be resampled target : ``Image`` Image instance to which source is resampled. The output image will have the same shape as the target, and the same coordmap. order : ``int``, optional What order of interpolation to use in ``scipy.ndimage``. mode : str, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. cval : scalar, optional Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. Returns ------- output : ``Image`` Image with interpolated data and output.coordmap == target.coordmap Examples -------- >>> from nipy.testing import funcfile, anatfile >>> from nipy.io.api import load_image >>> aimg_source = load_image(anatfile) >>> aimg_target = aimg_source >>> # in this case, we resample aimg to itself >>> resimg = resample_img2img(aimg_source, aimg_target) """ sip, sop = source.coordmap.ndims tip, top = target.coordmap.ndims #print sip, sop, tip, top if sop != top: raise ValueError("source coordmap output dimension not equal " "to target coordmap output dimension") mapping = np.eye(sop+1) # this would usually be 3+1 resimg = resample(source, target.coordmap, mapping, target.shape, order=order, mode=mode, cval=cval) return resimg def resample(image, target, mapping, shape, order=3, mode='constant', cval=0.0): """ Resample `image` to `target` CoordinateMap Use a "world-to-world" mapping `mapping` and spline interpolation of a `order`. Here, "world-to-world" refers to the fact that mapping should be a callable that takes a physical coordinate in "target" and gives a physical coordinate in "image". Parameters ---------- image : Image instance image that is to be resampled. target : CoordinateMap coordinate map for output image. mapping : callable or tuple or array transformation from target.function_range to image.coordmap.function_range, i.e. 'world-to-world mapping'. Can be specified in three ways: a callable, a tuple (A, b) representing the mapping y=dot(A,x)+b or a representation of this mapping as an affine array, in homogeneous coordinates. shape : sequence of int shape of output array, in target.function_domain. order : int, optional what order of interpolation to use in ``scipy.ndimage``. mode : str, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. cval : scalar, optional Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. Returns ------- output : Image instance Image has interpolated data and output.coordmap == target. """ if not callable(mapping): if type(mapping) is type(()): mapping = from_matvec(*mapping) # image world to target world mapping TW2IW = AffineTransform(target.function_range, image.coordmap.function_range, mapping) else: if isinstance(mapping, AffineTransform): TW2IW = mapping else: TW2IW = CoordinateMap(target.function_range, image.coordmap.function_range, mapping) # target voxel to image world mapping TV2IW = compose(TW2IW, target) # CoordinateMap describing mapping from target voxel to # image world coordinates if not isinstance(TV2IW, AffineTransform): # interpolator evaluates image at values image.coordmap.function_range, # i.e. physical coordinates rather than voxel coordinates grid = ArrayCoordMap.from_shape(TV2IW, shape) interp = ImageInterpolator(image, order=order, mode=mode, cval=cval) idata = interp.evaluate(grid.transposed_values) del(interp) else: # it is an affine transform, but, what if we compose? TV2IV = compose(image.coordmap.inverse(), TV2IW) if isinstance(TV2IV, AffineTransform): # still affine A, b = to_matvec(TV2IV.affine) idata = affine_transform(image.get_fdata(), A, offset=b, output_shape=shape, order=order, mode=mode, cval=cval) else: # not affine anymore interp = ImageInterpolator(image, order=order, mode=mode, cval=cval) grid = ArrayCoordMap.from_shape(TV2IV, shape) idata = interp.evaluate(grid.values) del(interp) return Image(idata, copy.copy(target)) nipy-0.6.1/nipy/algorithms/segmentation/000077500000000000000000000000001470056100100203145ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/segmentation/__init__.py000066400000000000000000000001551470056100100224260ustar00rootroot00000000000000 from .brain_segmentation import BrainT1Segmentation from .segmentation import Segmentation, moment_matching nipy-0.6.1/nipy/algorithms/segmentation/_segmentation.h000066400000000000000000000000671470056100100233240ustar00rootroot00000000000000#define PY_ARRAY_UNIQUE_SYMBOL _segmentation_ARRAY_API nipy-0.6.1/nipy/algorithms/segmentation/_segmentation.pyx000066400000000000000000000047721470056100100237240ustar00rootroot00000000000000# -*- Mode: Python -*- """ Markov random field utils. Author: Alexis Roche, 2010. """ __version__ = '0.2' # Set symbol for array_import; must come before cimport numpy cdef extern from "_segmentation.h": int PY_ARRAY_UNIQUE_SYMBOL # Includes from numpy cimport import_array, ndarray # Externals cdef extern from "mrf.h": void ve_step(ndarray ppm, ndarray ref, ndarray XYZ, ndarray U, int ngb_size, double beta) ndarray make_edges(ndarray mask, int ngb_size) double interaction_energy(ndarray ppm, ndarray XYZ, ndarray U, int ngb_size) # Initialize numpy import_array() import numpy as np def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': raise ValueError('ppm array should be double C-contiguous') if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': raise ValueError('ref array should be double C-contiguous') if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': raise ValueError('XYZ array should be intp C-contiguous') if not XYZ.shape[1] == 3: raise ValueError('XYZ array should be 3D') if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': raise ValueError('U array should be double C-contiguous') if not ppm.shape[-1] == ref.shape[-1]: raise ValueError('Inconsistent shapes for ppm and ref arrays') ve_step(ppm, ref, XYZ, U, ngb_size, beta) return ppm def _make_edges(mask, int ngb_size): if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': raise ValueError('mask array should be intp and C-contiguous') return make_edges(mask, ngb_size) def _interaction_energy(ppm, XYZ, U, int ngb_size): if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': raise ValueError('ppm array should be double C-contiguous') if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': raise ValueError('XYZ array should be intp C-contiguous') if not XYZ.shape[1] == 3: raise ValueError('XYZ array should be 3D') if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': raise ValueError('U array should be double C-contiguous') return interaction_energy(ppm, XYZ, U, ngb_size) nipy-0.6.1/nipy/algorithms/segmentation/brain_segmentation.py000066400000000000000000000100621470056100100245350ustar00rootroot00000000000000import numpy as np from .segmentation import Segmentation, map_from_ppm, moment_matching T1_ref_params = {} T1_ref_params['glob_mu'] = 1643.2 T1_ref_params['glob_sigma'] = 252772.3 T1_ref_params['3k'] = { 'mu': np.array([813.9, 1628.3, 2155.8]), 'sigma': np.array([46499.0, 30233.4, 17130.0])} T1_ref_params['4k'] = { 'mu': np.array([816.1, 1613.7, 1912.3, 2169.3]), 'sigma': np.array([47117.6, 27053.8, 8302.2, 14970.8])} T1_ref_params['5k'] = { 'mu': np.array([724.2, 1169.3, 1631.5, 1917.0, 2169.2]), 'sigma': np.array([22554.8, 21368.9, 20560.1, 7302.6, 14962.1])} class BrainT1Segmentation: def __init__(self, data, mask=None, model='3k', niters=25, ngb_size=6, beta=0.5, ref_params=None, init_params=None, convert=True): self.labels = ('CSF', 'GM', 'WM') self.data = data self.mask = mask mixmat = np.asarray(model) if mixmat.ndim == 2: nclasses = mixmat.shape[0] if nclasses < 3: raise ValueError('at least 3 classes required') if not mixmat.shape[1] == 3: raise ValueError('mixing matrix should have 3 rows') self.mixmat = mixmat elif model == '3k': self.mixmat = np.eye(3) elif model == '4k': self.mixmat = np.array([[1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]) elif model == '5k': self.mixmat = np.array([[1., 0., 0.], [1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]) else: raise ValueError('unknown brain segmentation model') self.niters = int(niters) self.beta = float(beta) self.ngb_size = int(ngb_size) # Class parameter initialization if init_params is None: if ref_params is None: ref_params = T1_ref_params self.init_mu, self.init_sigma = self._init_parameters(ref_params) else: self.init_mu = np.array(init_params[0], dtype='double') self.init_sigma = np.array(init_params[1], dtype='double') if not len(self.init_mu) == self.mixmat.shape[0]\ or not len(self.init_sigma) == self.mixmat.shape[0]: raise ValueError('Inconsistent initial parameter estimates') self._run() if convert: self.convert() else: self.label = map_from_ppm(self.ppm, self.mask) def _init_parameters(self, ref_params): if self.mask is not None: data = self.data[self.mask] else: data = self.data nclasses = self.mixmat.shape[0] if nclasses <= 5: key = str(self.mixmat.shape[0]) + 'k' ref_mu = ref_params[key]['mu'] ref_sigma = ref_params[key]['sigma'] else: ref_mu = np.linspace(ref_params['3k']['mu'][0], ref_params['3k']['mu'][-1], num=nclasses) ref_sigma = np.linspace(ref_params['3k']['sigma'][0], ref_params['3k']['sigma'][-1], num=nclasses) return moment_matching(data, ref_mu, ref_sigma, ref_params['glob_mu'], ref_params['glob_sigma']) def _run(self): S = Segmentation(self.data, mask=self.mask, mu=self.init_mu, sigma=self.init_sigma, ngb_size=self.ngb_size, beta=self.beta) S.run(niters=self.niters) self.mu = S.mu self.sigma = S.sigma self.ppm = S.ppm def convert(self): if self.ppm.shape[-1] == self.mixmat.shape[0]: self.ppm = np.dot(self.ppm, self.mixmat) self.label = map_from_ppm(self.ppm, self.mask) nipy-0.6.1/nipy/algorithms/segmentation/meson.build000066400000000000000000000007501470056100100224600ustar00rootroot00000000000000target_dir = 'nipy/algorithms/segmentation' py.extension_module('_segmentation', [ cython_gen.process('_segmentation.pyx'), 'mrf.c', ], c_args: cython_c_args, include_directories: ['.', incdir_numpy], install: true, subdir: target_dir ) python_sources = [ '__init__.py', 'brain_segmentation.py', 'segmentation.py' ] py.install_sources( python_sources, pure: false, subdir: target_dir ) install_subdir('tests', install_dir: install_root / target_dir) nipy-0.6.1/nipy/algorithms/segmentation/mrf.c000066400000000000000000000226501470056100100212510ustar00rootroot00000000000000#include "mrf.h" #include #include #ifdef _MSC_VER #define inline __inline #endif /* Encode neighborhood systems using static arrays */ int ngb6 [] = {1,0,0, -1,0,0, 0,1,0, 0,-1,0, 0,0,1, 0,0,-1}; int ngb26 [] = {1,0,0, -1,0,0, 0,1,0, 0,-1,0, 1,1,0, -1,-1,0, 1,-1,0, -1,1,0, 1,0,1, -1,0,1, 0,1,1, 0,-1,1, 1,1,1, -1,-1,1, 1,-1,1, -1,1,1, 1,0,-1, -1,0,-1, 0,1,-1, 0,-1,-1, 1,1,-1, -1,-1,-1, 1,-1,-1, -1,1,-1, 0,0,1, 0,0,-1}; static int* _select_neighborhood_system(int ngb_size) { if (ngb_size == 6) return ngb6; else if (ngb_size == 26) return ngb26; else { fprintf(stderr, "Unknown neighborhood system\n"); return NULL; } } /* Perform the VE-step of a VEM algorithm for a general Markov random field segmentation model. Compute exp[-2 * beta * SUM_j (U * qj)] for a given voxel, where the sum is on the neighbors. ppm assumed C-contiguous double (X, Y, Z, K) ref assumed C-contiguous double (NPTS, K) XYZ assumed C-contiguous npy_intp (NPTS, 3) */ #define TINY 1e-300 /* Compute neighborhood 'agreement' term required by the VE-step at a particular voxel */ static void _ngb_integrate(double* res, const PyArrayObject* ppm, npy_intp x, npy_intp y, npy_intp z, const double* U, const int* ngb, npy_intp ngb_size) { npy_intp xn, yn, zn, pos, ngb_idx, k, kk; const int* buf_ngb; /* Since PyArray_DATA() and PyArray_DIMS() are simple accessors, it is OK to * cast away const as long as we treat the results as const. */ const double* ppm_data = PyArray_DATA((PyArrayObject*) ppm); const npy_intp* dim_ppm = PyArray_DIMS((PyArrayObject*) ppm); double *buf, *buf_ppm, *q, *buf_U; npy_intp K = dim_ppm[3]; npy_intp u2 = dim_ppm[2]*K; npy_intp u1 = dim_ppm[1]*u2; npy_intp posmax = dim_ppm[0]*u1 - K; /* Re-initialize output array */ memset((void*)res, 0, K*sizeof(double)); /* Loop over neighbors */ buf_ngb = ngb; for (ngb_idx=0; ngb_idx posmax)) continue; /* Compute U*q */ buf_ppm = (double*)ppm_data + pos; for (k=0, buf=res, buf_U=(double*)U; kindex < iter->size) { /* Integrate the energy over the neighborhood */ xyz = PyArray_ITER_DATA(iter); x = xyz[0]; y = xyz[1]; z = xyz[2]; _ngb_integrate(p, ppm, x, y, z, U_data, (const int*)ngb, ngb_size); /* Apply exponential transform, multiply with reference and compute normalization constant */ psum = 0.0; for (k=0, pos=(iter->index)*K, buf=p; k TINY) for (k=0, buf=p; kindex < iter->size) { buf_idx = PyArray_ITER_DATA(iter); if (*buf_idx >= 0) mask_size ++; PyArray_ITER_NEXT(iter); } /* Allocate the array of edges using an upper bound of the required memory space */ edges_data = (npy_intp*)malloc(2 * ngb_size * mask_size * sizeof(npy_intp)); /* Second loop over the input array */ PyArray_ITER_RESET(iter); iter->contiguous = 0; /* To force coordinates to be updated */ buf_edges = edges_data; while(iter->index < iter->size) { xi = iter->coordinates[0]; yi = iter->coordinates[1]; zi = iter->coordinates[2]; buf_idx = PyArray_ITER_DATA(iter); idx_i = *buf_idx; /* Loop over neighbors if current point is within the mask */ if (idx_i >= 0) { buf_ngb = ngb; for (ngb_idx=0; ngb_idx= u0)) continue; /* Since PyArray_DATA() is a simple accessor, it is OK to cast away * const as long as we treat the result as const. */ buf_idx = PyArray_DATA((PyArrayObject*) idx) + pos; if (*buf_idx < 0) continue; buf_edges[0] = idx_i; buf_edges[1] = *buf_idx; n_edges ++; buf_edges += 2; } } /* Increment iterator */ PyArray_ITER_NEXT(iter); } /* Reallocate edges array to account for connections suppressed due to masking */ edges_data = realloc((void *)edges_data, 2 * n_edges * sizeof(npy_intp)); dim[0] = n_edges; edges = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_INTP, (void*)edges_data); /* Transfer ownership to python (to avoid memory leaks!) */ PyArray_ENABLEFLAGS(edges, NPY_ARRAY_OWNDATA); /* Free memory */ Py_XDECREF(iter); return edges; } /* Compute the interaction energy: sum_i,j qi^T U qj = sum_i qi^T sum_j U qj */ double interaction_energy(PyArrayObject* ppm, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size) { npy_intp k, x, y, z, pos; double *p, *buf; double res = 0.0, tmp; PyArrayIterObject* iter; int axis = 1; double* ppm_data; npy_intp K = PyArray_DIMS(ppm)[3]; npy_intp u2 = PyArray_DIMS(ppm)[2]*K; npy_intp u1 = PyArray_DIMS(ppm)[1]*u2; const npy_intp* xyz; /* Since PyArray_DATA() is a simple accessor, it is OK to cast away const as * long as we treat the result as const. */ const double* U_data = PyArray_DATA((PyArrayObject*) U); int* ngb; /* Neighborhood system */ ngb = _select_neighborhood_system(ngb_size); /* Pointer to ppm array */ ppm_data = PyArray_DATA(ppm); /* Allocate auxiliary vector */ p = (double*)calloc(K, sizeof(double)); /* Loop over points */ /* We can convert idx to a non-const PyObject for iteration purposes as long * as we treat any pointer values obtained via the iterator as const. */ iter = (PyArrayIterObject*)PyArray_IterAllButAxis((PyObject*)XYZ, &axis); while(iter->index < iter->size) { /* Compute the average ppm in the neighborhood */ xyz = PyArray_ITER_DATA(iter); x = xyz[0]; y = xyz[1]; z = xyz[2]; _ngb_integrate(p, ppm, x, y, z, U_data, (const int*)ngb, ngb_size); /* Calculate the dot product qi^T p where qi is the local posterior */ tmp = 0.0; pos = x*u1 + y*u2 + z*K; for (k=0, buf=p; k /* * Use extension numpy symbol table */ #define NO_IMPORT_ARRAY #include "_segmentation.h" #include extern void ve_step(PyArrayObject* ppm, const PyArrayObject* ref, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size, double beta); extern PyArrayObject* make_edges(const PyArrayObject* mask, int ngb_size); extern double interaction_energy(PyArrayObject* ppm, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/segmentation/segmentation.py000066400000000000000000000201411470056100100233610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ._segmentation import _interaction_energy, _ve_step NITERS = 10 NGB_SIZE = 26 BETA = 0.1 nonzero = lambda x: np.maximum(x, 1e-50) log = lambda x: np.log(nonzero(x)) class Segmentation: def __init__(self, data, mask=None, mu=None, sigma=None, ppm=None, prior=None, U=None, ngb_size=NGB_SIZE, beta=BETA): """ Class for multichannel Markov random field image segmentation using the variational EM algorithm. For details regarding the underlying algorithm, see: Roche et al, 2011. On the convergence of EM-like algorithms for image segmentation using Markov random fields. Medical Image Analysis (DOI: 10.1016/j.media.2011.05.002). Parameters ---------- data : array-like Input image array mask : array-like or tuple of array Input mask to restrict the segmentation beta : float Markov regularization parameter mu : array-like Initial class-specific means sigma : array-like Initial class-specific variances """ data = data.squeeze() if len(data.shape) not in (3, 4): raise ValueError('Invalid input image') if len(data.shape) == 3: nchannels = 1 space_shape = data.shape else: nchannels = data.shape[-1] space_shape = data.shape[0:-1] self.nchannels = nchannels # Make default mask (required by MRF regularization). This will # be passed to the _ve_step C-routine, which assumes a # contiguous int array and raise an error otherwise. Voxels on # the image borders are further rejected to avoid segmentation # faults. if mask is None: mask = np.ones(space_shape, dtype=bool) X, Y, Z = np.where(mask) XYZ = np.zeros((X.shape[0], 3), dtype='intp') XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] = X, Y, Z self.XYZ = XYZ self.mask = mask self.data = data[mask] if nchannels == 1: self.data = np.reshape(self.data, (self.data.shape[0], 1)) # By default, the ppm is initialized as a collection of # uniform distributions if ppm is None: nclasses = len(mu) self.ppm = np.zeros(list(space_shape) + [nclasses]) self.ppm[mask] = 1. / nclasses self.is_ppm = False self.mu = np.array(mu, dtype='double').reshape(\ (nclasses, nchannels)) self.sigma = np.array(sigma, dtype='double').reshape(\ (nclasses, nchannels, nchannels)) elif mu is None: nclasses = ppm.shape[-1] self.ppm = np.asarray(ppm) self.is_ppm = True self.mu = np.zeros((nclasses, nchannels)) self.sigma = np.zeros((nclasses, nchannels, nchannels)) else: raise ValueError('missing information') self.nclasses = nclasses if prior is not None: self.prior = np.asarray(prior)[self.mask].reshape(\ [self.data.shape[0], nclasses]) else: self.prior = None self.ngb_size = int(ngb_size) self.set_markov_prior(beta, U=U) def set_markov_prior(self, beta, U=None): if U is not None: # make sure it's C-contiguous self.U = np.asarray(U).copy() else: # Potts model U = np.ones((self.nclasses, self.nclasses)) U[_diag_indices(self.nclasses)] = 0 self.U = U self.beta = float(beta) def vm_step(self, freeze=()): classes = list(range(self.nclasses)) for i in freeze: classes.remove(i) for i in classes: P = self.ppm[..., i][self.mask].ravel() Z = nonzero(P.sum()) tmp = self.data.T * P.T mu = tmp.sum(1) / Z mu_ = mu.reshape((len(mu), 1)) sigma = np.dot(tmp, self.data) / Z - np.dot(mu_, mu_.T) self.mu[i] = mu self.sigma[i] = sigma def log_external_field(self): """ Compute the logarithm of the external field, where the external field is defined as the likelihood times the first-order component of the prior. """ lef = np.zeros([self.data.shape[0], self.nclasses]) for i in range(self.nclasses): centered_data = self.data - self.mu[i] if self.nchannels == 1: inv_sigma = 1. / nonzero(self.sigma[i]) norm_factor = np.sqrt(inv_sigma.squeeze()) else: inv_sigma = np.linalg.inv(self.sigma[i]) norm_factor = 1. / np.sqrt(\ nonzero(np.linalg.det(self.sigma[i]))) maha_dist = np.sum(centered_data * np.dot(inv_sigma, centered_data.T).T, 1) lef[:, i] = -.5 * maha_dist lef[:, i] += log(norm_factor) if self.prior is not None: lef += log(self.prior) return lef def normalized_external_field(self): f = self.log_external_field().T f -= np.max(f, 0) np.exp(f, f) f /= f.sum(0) return f.T def ve_step(self): nef = self.normalized_external_field() if self.beta == 0: self.ppm[self.mask] = np.reshape(\ nef, self.ppm[self.mask].shape) else: self.ppm = _ve_step(self.ppm, nef, self.XYZ, self.U, self.ngb_size, self.beta) def run(self, niters=NITERS, freeze=()): if self.is_ppm: self.vm_step(freeze=freeze) for i in range(niters): self.ve_step() self.vm_step(freeze=freeze) self.is_ppm = True def map(self): """ Return the maximum a posterior label map """ return map_from_ppm(self.ppm, self.mask) def free_energy(self, ppm=None): """ Compute the free energy defined as: F(q, theta) = int q(x) log q(x)/p(x,y/theta) dx associated with input parameters mu, sigma and beta (up to an ignored constant). """ if ppm is None: ppm = self.ppm q = ppm[self.mask] # Entropy term lef = self.log_external_field() f1 = np.sum(q * (log(q) - lef)) # Interaction term if self.beta > 0.0: f2 = self.beta * _interaction_energy(ppm, self.XYZ, self.U, self.ngb_size) else: f2 = 0.0 return f1 + f2 def _diag_indices(n, ndim=2): # diag_indices function present in numpy 1.4 and later. This for # compatibility with numpy < 1.4 idx = np.arange(n) return (idx,) * ndim def moment_matching(dat, mu, sigma, glob_mu, glob_sigma): """ Moment matching strategy for parameter initialization to feed a segmentation algorithm. Parameters ---------- data: array Image data. mu : array Template class-specific intensity means sigma : array Template class-specific intensity variances glob_mu : float Template global intensity mean glob_sigma : float Template global intensity variance Returns ------- dat_mu: array Guess of class-specific intensity means dat_sigma: array Guess of class-specific intensity variances """ dat_glob_mu = float(np.mean(dat)) dat_glob_sigma = float(np.var(dat)) a = np.sqrt(dat_glob_sigma / glob_sigma) b = dat_glob_mu - a * glob_mu dat_mu = a * mu + b dat_sigma = (a ** 2) * sigma return dat_mu, dat_sigma def map_from_ppm(ppm, mask=None): x = np.zeros(ppm.shape[0:-1], dtype='uint8') if mask is None: mask = ppm == 0 x[mask] = ppm[mask].argmax(-1) + 1 return x def binarize_ppm(q): """ Assume input ppm is masked (ndim==2) """ bin_q = np.zeros(q.shape) bin_q[:q.shape[0], np.argmax(q, axis=1)] = 1 return bin_q nipy-0.6.1/nipy/algorithms/segmentation/tests/000077500000000000000000000000001470056100100214565ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/segmentation/tests/__init__.py000066400000000000000000000000501470056100100235620ustar00rootroot00000000000000# Init to make test directory a package nipy-0.6.1/nipy/algorithms/segmentation/tests/test_segmentation.py000066400000000000000000000076121470056100100255720ustar00rootroot00000000000000""" Testing brain segmentation module """ from numbers import Number import numpy as np from numpy.testing import assert_almost_equal, assert_array_almost_equal from ....io.files import load as load_image from ....testing import anatfile from ..brain_segmentation import BrainT1Segmentation from ..segmentation import Segmentation anat_img = load_image(anatfile) anat_mask = anat_img.get_fdata() > 0 DIMS = (30, 30, 20) def _check_dims(x, ndim, shape): if isinstance(shape, Number): shape = (shape,) for i in range(ndim): assert x.shape[i] == shape[i] def _test_brain_seg(model, niters=3, beta=0, ngb_size=6, init_params=None, convert=True): S = BrainT1Segmentation(anat_img.get_fdata(), mask=anat_mask, model=model, niters=niters, beta=beta, ngb_size=ngb_size, init_params=init_params, convert=convert) shape = anat_img.shape if convert: nclasses = 3 else: nclasses = S.mixmat.shape[0] # Check that the class attributes have appropriate dimensions _check_dims(S.ppm, 4, list(shape) + [nclasses]) _check_dims(S.label, 3, shape) _check_dims(S.mu, 1, S.mixmat.shape[0]) _check_dims(S.sigma, 1, S.mixmat.shape[0]) # Check that probabilities are zero outside the mask and sum up to # one inside the mask assert_almost_equal(S.ppm[~S.mask].sum(-1).max(), 0) assert_almost_equal(S.ppm[S.mask].sum(-1).min(), 1) # Check that labels are zero outside the mask and > 1 inside the # mask assert_almost_equal(S.label[~S.mask].max(), 0) assert_almost_equal(S.label[S.mask].min(), 1) def test_brain_seg1(): _test_brain_seg('3k', niters=3, beta=0.0, ngb_size=6) def test_brain_seg2(): _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6) def test_brain_seg3(): _test_brain_seg('4k', niters=3, beta=0.5, ngb_size=6) def test_brain_seg4(): _test_brain_seg('4k', niters=3, beta=0.5, ngb_size=26) def test_brain_seg5(): _test_brain_seg(np.array([[1., 0., 0.], [1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]), niters=3, beta=0.5, ngb_size=6) def test_brain_seg6(): _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6, convert=False) def test_brain_seg7(): mu = np.array([0, 50, 100]) sigma = np.array([1000, 2000, 3000]) _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6, init_params=(mu, sigma)) def _test_segmentation(S, nchannels=1): assert S.nchannels == nchannels nef = S.normalized_external_field() assert_array_almost_equal(nef.sum(-1), np.ones(nef.shape[0])) S.run(niters=5) label = S.map() assert label.ndim == 3 assert label.dtype == 'uint8' assert isinstance(S.free_energy(), float) def test_segmentation_3d(): data = np.random.random(DIMS) _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1])) def test_segmentation_3d_with_MRF(): data = np.random.random(DIMS) _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1], beta=.2)) def test_segmentation_3d_with_mask(): data = np.random.random(DIMS) mask = data > .1 if mask[0].size < 1: return _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1], mask=mask)) def test_segmentation_3d_multichannel(): data = np.random.random(list(DIMS) + [2]) mask = data[..., 0] > .1 if mask[0].size < 1: return _test_segmentation(Segmentation(data, mu=[[0.25, 0.25], [0.75, 0.75]], sigma=[np.eye(2), np.eye(2)], mask=mask), nchannels=2) nipy-0.6.1/nipy/algorithms/slicetiming/000077500000000000000000000000001470056100100201265ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/slicetiming/__init__.py000066400000000000000000000004461470056100100222430ustar00rootroot00000000000000# Init for slicetiming subpackage """ Slicetiming subpackage The standard nipy method of slice timing is implemented in :mod:`nipy.algorithms.registration.groupwise_registration`. This subpackage is a placeholder for other slice timing methods, and for utility functions for slice timing """ nipy-0.6.1/nipy/algorithms/slicetiming/tests/000077500000000000000000000000001470056100100212705ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/slicetiming/tests/__init__.py000066400000000000000000000000351470056100100233770ustar00rootroot00000000000000# Init for slicetiming tests nipy-0.6.1/nipy/algorithms/slicetiming/tests/test_timefuncs.py000066400000000000000000000075671470056100100247150ustar00rootroot00000000000000""" Testing timefuncs module """ import numpy as np from numpy.testing import assert_almost_equal, assert_array_equal from .. import timefuncs as tf def test_ascending(): tr = 2. for func in (tf.st_01234, tf.ascending): for n_slices in (10, 11): assert_almost_equal( func(n_slices, tr), np.arange(n_slices) / n_slices * tr) assert_array_equal( np.argsort(func(5, 1)), [0, 1, 2, 3, 4]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_descending(): tr = 2. for func in (tf.st_43210, tf.descending): for n_slices in (10, 11): assert_almost_equal( func(n_slices, tr), np.arange(n_slices-1, -1, -1) / n_slices * tr) assert_array_equal( np.argsort(func(5, 1)), [4, 3, 2, 1, 0]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_asc_alt_2(): tr = 2. for func in (tf.st_02413, tf.asc_alt_2): assert_almost_equal( func(10, tr) / tr * 10, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]) assert_almost_equal( func(11, tr) / tr * 11, [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5]) assert_array_equal( np.argsort(func(5, 1)), [0, 2, 4, 1, 3]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_desc_alt_2(): tr = 2. for func in (tf.st_42031, tf.desc_alt_2): assert_almost_equal( func(10, tr) / tr * 10, [9, 4, 8, 3, 7, 2, 6, 1, 5, 0]) assert_almost_equal( func(11, tr) / tr * 11, [5, 10, 4, 9, 3, 8, 2, 7, 1, 6, 0]) assert_array_equal( np.argsort(func(5, 1)), [4, 2, 0, 3, 1]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_asc_alt_2_1(): tr = 2. for func in (tf.st_13024, tf.asc_alt_2_1): assert_almost_equal( func(10, tr) / tr * 10, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]) assert_almost_equal( func(11, tr) / tr * 11, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4, 10]) assert_array_equal( np.argsort(func(5, 1)), [1, 3, 0, 2, 4]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_asc_alt_siemens(): tr = 2. for func in (tf.st_odd0_even1, tf.asc_alt_siemens): assert_almost_equal( func(10, tr) / tr * 10, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]) assert_almost_equal( func(11, tr) / tr * 11, [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5]) assert_array_equal( np.argsort(func(5, 1)), [0, 2, 4, 1, 3]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_asc_alt_half(): tr = 2. for func in (tf.st_03142, tf.asc_alt_half): assert_almost_equal( func(10, tr) / tr * 10, [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]) assert_almost_equal( func(11, tr) / tr * 11, [0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9]) assert_array_equal( np.argsort(func(5, 1)), [0, 3, 1, 4, 2]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_desc_alt_half(): tr = 2. for func in (tf.st_41302, tf.desc_alt_half): assert_almost_equal( func(10, tr) / tr * 10, [9, 7, 5, 3, 1, 8, 6, 4, 2, 0]) assert_almost_equal( func(11, tr) / tr * 11, [9, 7, 5, 3, 1, 10, 8, 6, 4, 2, 0]) assert_array_equal( np.argsort(func(5, 1)), [4, 1, 3, 0, 2]) assert tf.SLICETIME_FUNCTIONS[func.__name__] == func def test_number_names(): for func in ( tf.st_01234, tf.st_43210, tf.st_02413, tf.st_42031, tf.st_13024, tf.st_03142, tf.st_41302): name = func.__name__ assert tf.SLICETIME_FUNCTIONS[name] == func assert tf.SLICETIME_FUNCTIONS[name[3:]] == func nipy-0.6.1/nipy/algorithms/slicetiming/timefuncs.py000066400000000000000000000160421470056100100225000ustar00rootroot00000000000000""" Utility functions for returning slice times from number of slices and TR Slice timing routines in nipy need a vector of slice times. Slice times are vectors $t_i$ with $i = 0 ... N$ of times, one for each slice, where $t_i$ gives the time at which slice number $i$ was acquired, relative to the beginning of the volume acquisition. We like these vectors because they are unambiguous; the indices $i$ refer to positions in space, and the values $t_i$ refer to times. But, there are many common slice timing regimes for which it's easy to get the slice times once you know the volume acquisition time (the TR) and the number of slices. For example, if you acquired the slices in a simple ascending order, and you have 10 slices and the TR was 2.0, then the slice times are: >>> import numpy as np >>> np.arange(10) / 10. * 2.0 array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8]) These are small convenience functions that accept the number of slices and the TR as input, and return a vector of slice times: >>> ascending(10, 2.) array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8]) """ import numpy as np # Legacy repr printing from numpy. # Dictionary (key, value) == (name, function) for slice timing functions SLICETIME_FUNCTIONS = {} def _dec_filldoc(func): """ Fill docstring of slice time function """ func._doc_template = func.__doc__ func.__doc__ = func.__doc__.format( name=func.__name__, pstr="""Note: slice 0 is the first slice in the voxel data block Parameters ---------- n_slices : int Number of slices in volume TR : float Time to acquire one full volume Returns ------- slice_times : (n_slices,) ndarray Vectors $t_i i = 0 ... N$ of times, one for each slice, where $t_i$ gives the time at which slice number $i$ was acquired, relative to the beginning of the volume acquisition. """) return func def _dec_register_stf(func): """ Register slice time function in module dictionary """ name = func.__name__ SLICETIME_FUNCTIONS[name] = func if name.startswith('st_'): short_name = name[3:] if short_name in SLICETIME_FUNCTIONS: raise ValueError( f"Duplicate short / long function name {short_name}") SLICETIME_FUNCTIONS[short_name] = func return func def _dec_stfunc(func): return _dec_register_stf(_dec_filldoc(func)) def _derived_func(name, func): def derived(n_slices, TR): return func(n_slices, TR) derived.__name__ = name derived.__doc__ = func._doc_template return _dec_stfunc(derived) @_dec_stfunc def st_01234(n_slices, TR): """ Simple ascending slice sequence slice 0 first, slice 1 second etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0. , 0.2, 0.4, 0.6, 0.8]) {pstr} """ return np.arange(n_slices) / n_slices * TR ascending = _derived_func('ascending', st_01234) @_dec_stfunc def st_43210(n_slices, TR): """ Simple descending slice sequence slice ``n_slices-1`` first, slice ``n_slices - 2`` second etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0.8, 0.6, 0.4, 0.2, 0. ]) {pstr} """ return np.arange(n_slices)[::-1] / n_slices * TR descending = _derived_func('descending', st_43210) @_dec_stfunc def st_02413(n_slices, TR): """Ascend alternate every second slice, starting at first slice Collect slice 0 first, slice 2 second up to top. Then return to collect slice 1, slice 3 etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0. , 0.6, 0.2, 0.8, 0.4]) {pstr} """ one_slice = TR / n_slices time_to_space = list(range(0, n_slices, 2)) + list(range(1, n_slices, 2)) space_to_time = np.argsort(time_to_space) return space_to_time * one_slice asc_alt_2 = _derived_func('asc_alt_2', st_02413) @_dec_stfunc def st_13024(n_slices, TR): """Ascend alternate every second slice, starting at second slice Collect slice 1 first, slice 3 second up to top (highest numbered slice). Then return to collect slice 0, slice 2 etc. This order is rare except on Siemens acquisitions with an even number of slices. See :func:`st_odd0_even1` for this logic. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0.4, 0. , 0.6, 0.2, 0.8]) {pstr} """ one_slice = TR / n_slices time_to_space = list(range(1, n_slices, 2)) + list(range(0, n_slices, 2)) space_to_time = np.argsort(time_to_space) return space_to_time * one_slice asc_alt_2_1 = _derived_func('asc_alt_2_1', st_13024) @_dec_stfunc def st_42031(n_slices, TR): """Descend alternate every second slice, starting at last slice Collect slice (`n_slices` - 1) first, slice (`nslices` - 3) second down to bottom (lowest numbered slice). Then return to collect slice (`n_slices` -2), slice (`n_slices` - 4) etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0.4, 0.8, 0.2, 0.6, 0. ]) {pstr} """ return st_02413(n_slices, TR)[::-1] desc_alt_2 = _derived_func('desc_alt_2', st_42031) @_dec_stfunc def st_odd0_even1(n_slices, TR): """Ascend alternate starting at slice 0 for odd, slice 1 for even `n_slices` Acquisitions with alternating ascending slices from Siemens scanners often seem to have this behavior as default - see: https://mri.radiology.uiowa.edu/fmri_images.html This means we use the :func:`st_02413` algorithm if `n_slices` is odd, and the :func:`st_13024` algorithm if `n_slices` is even. For example, for 4 slices and a TR of 1: >>> {name}(4, 1.) array([ 0.5 , 0. , 0.75, 0.25]) 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0. , 0.6, 0.2, 0.8, 0.4]) {pstr} """ if n_slices % 2 == 0: return st_13024(n_slices, TR) return st_02413(n_slices, TR) asc_alt_siemens = _derived_func('asc_alt_siemens', st_odd0_even1) @_dec_stfunc def st_03142(n_slices, TR): """Ascend alternate, where alternation is by half the volume Collect slice 0 then slice ``ceil(n_slices / 2.)`` then slice 1 then slice ``ceil(nslices / 2.) + 1`` etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0. , 0.4, 0.8, 0.2, 0.6]) {pstr} """ one_slice = TR / n_slices space_to_time = (list(range(0, n_slices, 2)) + list(range(1, n_slices, 2))) return np.array(space_to_time) * one_slice asc_alt_half = _derived_func('asc_alt_half', st_03142) @_dec_stfunc def st_41302(n_slices, TR): """Descend alternate, where alternation is by half the volume Collect slice ``(n_slices - 1)`` then slice ``floor(nslices / 2.) - 1`` then slice ``(n_slices - 2)`` then slice ``floor(nslices / 2.) - 2`` etc. For example, for 5 slices and a TR of 1: >>> {name}(5, 1.) array([ 0.6, 0.2, 0.8, 0.4, 0. ]) {pstr} """ return st_03142(n_slices, TR)[::-1] desc_alt_half = _derived_func('desc_alt_half', st_41302) nipy-0.6.1/nipy/algorithms/statistics/000077500000000000000000000000001470056100100200115ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/__init__.py000066400000000000000000000004501470056100100221210ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ TODO """ __docformat__ = 'restructuredtext' from . import formula, intvol, onesample, rft from ._quantile import _median as median from ._quantile import _quantile as quantile nipy-0.6.1/nipy/algorithms/statistics/_quantile.pyx000066400000000000000000000053741470056100100225450ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Very fast quantile computation using partial sorting. Author: Alexis Roche. """ __version__ = '0.1' import numpy as np cimport numpy as np cdef extern from "quantile.h": double quantile(double* data, np.npy_intp size, np.npy_intp stride, double r, int interp) np.import_array() # This is faster than scipy.stats.scoreatpercentile owing to partial # sorting def _quantile(X, double ratio, int interp=False, int axis=0): """ Fast quantile computation using partial sorting. This function has similar behavior to `scipy.percentile` but runs significantly faster for large arrays. Parameters ---------- X : array Input array. Will be internally converted into an array of doubles if needed. ratio : float A value in range [0, 1] defining the desired quantiles (the higher the ratio, the higher the quantiles). interp : boolean Determine whether quantiles are interpolated. axis : int Axis along which quantiles are computed. Output ------ Y : array Array of quantiles """ cdef double *x cdef double *y cdef long int size, stride cdef np.flatiter itX, itY # Convert the input array to double if needed X = np.asarray(X, dtype='double') # Check the input ratio is in range (0,1) if ratio < 0 or ratio > 1: raise ValueError('ratio must be in range 0..1') # Allocate output array Y dims = list(X.shape) dims[axis] = 1 Y = np.zeros(dims) # Set size and stride along specified axis size = X.shape[axis] stride = X.strides[axis] / sizeof(double) # Create array iterators itX = np.PyArray_IterAllButAxis(X, &axis) itY = np.PyArray_IterAllButAxis(Y, &axis) # Loop while np.PyArray_ITER_NOTDONE(itX): x = np.PyArray_ITER_DATA(itX) y = np.PyArray_ITER_DATA(itY) y[0] = quantile(x, size, stride, ratio, interp) np.PyArray_ITER_NEXT(itX) np.PyArray_ITER_NEXT(itY) return Y # This is faster than numpy.stats # due to the underlying algorithm that relies on # partial sorting as opposed to full sorting. def _median(X, axis=0): """ Fast median computation using partial sorting. This function is similar to `numpy.median` but runs significantly faster for large arrays. Parameters ---------- X : array Input array. Will be internally converted into an array of doubles if needed. axis : int Axis along which medians are computed. Output ------ Y : array Array of medians """ return _quantile(X, axis=axis, ratio=0.5, interp=True) nipy-0.6.1/nipy/algorithms/statistics/api.py000066400000000000000000000010051470056100100211300ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Pseudo-package for some important statistics symbols For example: >>> from nipy.algorithms.statistics.api import Formula """ from .formula import formulae from .formula.formulae import ( Factor, Formula, Term, make_recarray, natural_spline, terms, ) from .models import family, glm, model, regression from .models.regression import ARModel, OLSModel, WLSModel, isestimable nipy-0.6.1/nipy/algorithms/statistics/bayesian_mixed_effects.py000066400000000000000000000045021470056100100250440ustar00rootroot00000000000000""" Generic implementation of multiple regression analysis under noisy measurements. """ import numpy as np nonzero = lambda x: np.maximum(x, 1e-25) def two_level_glm(y, vy, X, niter=10): """ Inference of a mixed-effect linear model using the variational Bayes algorithm. Parameters ---------- y : array-like Array of observations. Shape should be (n, ...) where n is the number of independent observations per unit. vy : array-like First-level variances associated with the observations. Should be of the same shape as Y. X : array-like Second-level design matrix. Shape should be (n, p) where n is the number of observations per unit, and p is the number of regressors. Returns ------- beta : array-like Effect estimates (posterior means) s2 : array-like Variance estimates. The posterior variance matrix of beta[:, i] may be computed by s2[:, i] * inv(X.T * X) dof : float Degrees of freedom as per the variational Bayes approximation (simply, the number of observations minus the number of independent regressors) """ # Number of observations, regressors and points nobs = X.shape[0] if X.ndim == 1: nreg = 1 else: nreg = X.shape[1] if nobs <= nreg: raise ValueError('Too many regressors compared to data size') if y.ndim == 1: npts = 1 else: npts = np.prod(y.shape[1:]) # Reshape input arrays X = X.reshape((nobs, nreg)) y = np.reshape(y, (nobs, npts)) vy = nonzero(np.reshape(vy, (nobs, npts))) # Degrees of freedom dof = float(nobs - nreg) # Compute the pseudo-inverse matrix pinvX = np.linalg.pinv(X) # Initialize outputs b = np.zeros((nreg, npts)) zfit = np.zeros((nobs, npts)) s2 = np.inf # VB loop for it in range(niter): # Update distribution of "true" effects w1 = 1 / vy w2 = 1 / nonzero(s2) vz = 1 / (w1 + w2) z = vz * (w1 * y + w2 * zfit) # Update distribution of population parameters b = np.dot(pinvX, z) zfit = np.dot(X, b) s2 = np.sum((z - zfit) ** 2 + vz, 0) / dof # Output arrays B = np.reshape(b, [nreg] + list(y.shape[1:])) S2 = np.reshape(s2, list(y.shape[1:])) return B, S2, dof nipy-0.6.1/nipy/algorithms/statistics/bench/000077500000000000000000000000001470056100100210705ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/bench/__init__.py000066400000000000000000000000451470056100100232000ustar00rootroot00000000000000# Init for benchmarks for algorithms nipy-0.6.1/nipy/algorithms/statistics/bench/bench_intvol.py000066400000000000000000000060351470056100100241200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import sys import numpy as np import numpy.testing as npt from .. import intvol from ..tests.test_intrinsic_volumes import nonintersecting_boxes, randorth def bench_lips3d(): np.random.seed(20111001) phi = intvol.Lips3d EC3d = intvol.EC3d repeat = 4 bx_sz = 60 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)*3) c = np.indices(box1.shape).astype(np.float64) sys.stdout.flush() print("\nIntrinsic volumes 3D") print("--------------------") print(f"Box1 {npt.measure('phi(c,box1)', repeat):6.2f}") print(f"Box2 {npt.measure('phi(c, box2)', repeat):6.2f}") print(f"Box1+2 {npt.measure('phi(c, box1 + box2)', repeat):6.2f}") d = np.random.standard_normal((10,) + (bx_sz,) * 3) print(f"Box1+2 d {npt.measure('phi(d, box1 + box2)', repeat):6.2f}") U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print(f"Box1+2 e {npt.measure('phi(e, box1 + box2)', repeat):6.2f}") print(f"Box1+2 EC {npt.measure('EC3d(box1 + box2)', repeat):6.2f}") sys.stdout.flush() def bench_lips2d(): np.random.seed(20111001) phi = intvol.Lips2d EC2d = intvol.EC2d repeat = 4 bx_sz = 500 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)*2) c = np.indices(box1.shape).astype(np.float64) sys.stdout.flush() print("\nIntrinsic volumes 2D") print("--------------------") print(f"Box1 {npt.measure('phi(c,box1)', repeat):6.2f}") print(f"Box2 {npt.measure('phi(c, box2)', repeat):6.2f}") print(f"Box1+2 {npt.measure('phi(c, box1 + box2)', repeat):6.2f}") d = np.random.standard_normal((10,) + (bx_sz,) * 2) print(f"Box1+2 d {npt.measure('phi(d, box1 + box2)', repeat):6.2f}") U = randorth(p=6)[0:2] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print(f"Box1+2 e {npt.measure('phi(e, box1 + box2)', repeat):6.2f}") print(f"Box1+2 EC {npt.measure('EC2d(box1 + box2)', repeat):6.2f}") sys.stdout.flush() def bench_lips1d(): np.random.seed(20111001) phi = intvol.Lips1d EC1d = intvol.EC1d repeat = 4 bx_sz = 100000 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)) c = np.indices(box1.shape).astype(np.float64) sys.stdout.flush() print("\nIntrinsic volumes 1D") print("--------------------") print(f"Box1 {npt.measure('phi(c,box1)', repeat):6.2f}") print(f"Box2 {npt.measure('phi(c, box2)', repeat):6.2f}") print(f"Box1+2 {npt.measure('phi(c, box1 + box2)', repeat):6.2f}") d = np.random.standard_normal((10, bx_sz)) print(f"Box1+2 d {npt.measure('phi(d, box1 + box2)', repeat):6.2f}") U = randorth(p=6)[0:1] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print(f"Box1+2 e {npt.measure('phi(e, box1 + box2)', repeat):6.2f}") print(f"Box1+2 EC {npt.measure('EC1d(box1 + box2)', repeat):6.2f}") sys.stdout.flush() nipy-0.6.1/nipy/algorithms/statistics/empirical_pvalue.py000066400000000000000000000452021470056100100237070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Routines to get corrected p-values estimates, based on the observations. It implements 3 approaches: - Benjamini-Hochberg FDR: http://en.wikipedia.org/wiki/False_discovery_rate - a class that fits a Gaussian model to the central part of an histogram, following [1] [1] Schwartzman A, Dougherty RF, Lee J, Ghahremani D, Taylor JE. Empirical null and false discovery rate analysis in neuroimaging. Neuroimage. 2009 Jan 1;44(1):71-82. PubMed PMID: 18547821. DOI: 10.1016/j.neuroimage.2008.04.182 This is typically necessary to estimate a FDR when one is not certain that the data behaves as a standard normal under H_0. - a model based on Gaussian mixture modelling 'a la Oxford' Author : Bertrand Thirion, Yaroslav Halchenko, 2008-2012 """ import numpy as np import scipy.stats as st from numpy.linalg import pinv def check_p_values(p_values): """Basic checks on the p_values array: values should be within [0,1] Assures also that p_values are at least in 1d array. None of the checks is performed if p_values is None. Parameters ---------- p_values : array of shape (n) The sample p-values Returns ------- p_values : array of shape (n) The sample p-values """ if p_values is None: return None # Take all elements unfolded and assure having at least 1d p_values = np.atleast_1d(np.ravel(p_values)) if np.any(np.isnan(p_values)): raise ValueError("%d values are NaN" % (sum(np.isnan(p_values)))) if p_values.min() < 0: raise ValueError(f"Negative p-values. Min={p_values.min():g}") if p_values.max() > 1: raise ValueError(f"P-values greater than 1! Max={p_values.max():g}") return p_values def gaussian_fdr(x): """Return the FDR associated with each value assuming a Gaussian distribution """ return fdr(st.norm.sf(np.squeeze(x))) def gaussian_fdr_threshold(x, alpha=0.05): """Return FDR threshold given normal variates Given an array x of normal variates, this function returns the critical p-value associated with alpha. x is explicitly assumed to be normal distributed under H_0 Parameters ----------- x: ndarray input data alpha: float, optional desired significance Returns ------- threshold : float threshold, given as a Gaussian critical value """ pvals = st.norm.sf(x) pth = fdr_threshold(pvals, alpha) return st.norm.isf(pth) def fdr_threshold(p_values, alpha=0.05): """Return FDR threshold given p values Parameters ----------- p_values : array of shape (n), optional The samples p-value alpha : float, optional The desired FDR significance Returns ------- critical_p_value: float The p value corresponding to the FDR alpha """ p_values = check_p_values(p_values) n_samples = np.size(p_values) p_corr = alpha / n_samples sp_values = np.sort(p_values) critical_set = sp_values[ sp_values < p_corr * np.arange(1, n_samples + 1)] if len(critical_set) > 0: critical_p_value = critical_set.max() else: critical_p_value = p_corr return critical_p_value def fdr(p_values=None, verbose=0): """Returns the FDR associated with each p value Parameters ----------- p_values : ndarray of shape (n) The samples p-value Returns ------- q : array of shape(n) The corresponding fdr values """ p_values = check_p_values(p_values) n_samples = p_values.size order = p_values.argsort() sp_values = p_values[order] # compute q while in ascending order q = np.minimum(1, n_samples * sp_values / np.arange(1, n_samples + 1)) for i in range(n_samples - 1, 0, - 1): q[i - 1] = min(q[i], q[i - 1]) # reorder the results inverse_order = np.arange(n_samples) inverse_order[order] = np.arange(n_samples) q = q[inverse_order] if verbose: import matplotlib.pyplot as plt plt.figure() plt.xlabel('Input p-value') plt.plot(p_values, q, '.') plt.ylabel('Associated fdr') return q class NormalEmpiricalNull: """Class to compute the empirical null normal fit to the data. The data which is used to estimate the FDR, assuming a Gaussian null from Schwartzmann et al., NeuroImage 44 (2009) 71--82 """ def __init__(self, x): """Initialize an empirical null normal object. Parameters ----------- x : 1D ndarray The data used to estimate the empirical null. """ x = np.reshape(x, (- 1)) self.x = np.sort(x) self.n = np.size(x) self.learned = 0 def learn(self, left=0.2, right=0.8): """ Estimate the proportion, mean and variance of a Gaussian distribution for a fraction of the data Parameters ---------- left: float, optional Left cut parameter to prevent fitting non-gaussian data right: float, optional Right cut parameter to prevent fitting non-gaussian data Notes ----- This method stores the following attributes: * mu = mu * p0 = min(1, np.exp(lp0)) * sqsigma: variance of the estimated normal distribution * sigma: np.sqrt(sqsigma) : standard deviation of the estimated normal distribution """ # take a central subsample of x x = self.x[int(self.n * left): int(self.n * right)] # generate the histogram step = 3.5 * np.std(self.x) / np.exp(np.log(self.n) / 3) bins = max(10, int((self.x.max() - self.x.min()) // step)) hist, ledge = np.histogram(x, bins=bins) step = ledge[1] - ledge[0] medge = ledge + 0.5 * step # remove null bins hist = hist[hist > 0].astype(np.float64) medge = medge[:-1][hist > 0] # edges include rightmost outer # fit the histogram dmtx = np.ones((3, len(hist))) dmtx[1] = medge dmtx[2] = medge ** 2 coef = np.dot(np.log(hist), pinv(dmtx)) sqsigma = - 1.0 / (2 * coef[2]) sqsigma = max(sqsigma, 1.e-6) mu = coef[1] * sqsigma lp0 = (coef[0] - np.log(step * self.n) + 0.5 * np.log(2 * np.pi * sqsigma) + mu ** 2 / (2 * sqsigma)) self.mu = mu self.p0 = min(1, np.exp(lp0)) self.sigma = np.sqrt(sqsigma) self.sqsigma = sqsigma def fdrcurve(self): """ Returns the FDR associated with any point of self.x """ import scipy.stats as st if self.learned == 0: self.learn() efp = (self.p0 * st.norm.sf(self.x, self.mu, self.sigma) * self.n / np.arange(self.n, 0, - 1)) efp = np.minimum(efp, 1) ix = np.argsort(self.x) for i in range(np.size(efp) - 1, 0, - 1): efp[ix[i - 1]] = np.maximum(efp[ix[i]], efp[ix[i - 1]]) self.sorted_x = self.x[ix] self.sorted_fdr = efp[ix] return efp def threshold(self, alpha=0.05, verbose=0): """ Compute the threshold corresponding to an alpha-level FDR for x Parameters ----------- alpha : float, optional the chosen false discovery rate threshold. verbose : boolean, optional the verbosity level, if True a plot is generated. Returns ------- theta: float the critical value associated with the provided FDR """ efp = self.fdrcurve() if verbose: self.plot(efp, alpha) if efp[-1] > alpha: print(f"the maximal value is {self.x[-1]:f} , the corresponding FDR is {efp[-1]:f} ") return np.inf j = np.argmin(efp[::-1] < alpha) + 1 return 0.5 * (self.x[-j] + self.x[-j + 1]) def uncorrected_threshold(self, alpha=0.001, verbose=0): """Compute the threshold corresponding to a specificity alpha for x Parameters ----------- alpha : float, optional the chosen false discovery rate (FDR) threshold. verbose : boolean, optional the verbosity level, if True a plot is generated. Returns ------- theta: float the critical value associated with the provided p-value """ if self.learned == 0: self.learn() threshold = st.norm.isf(alpha, self.mu, self.sigma) if not np.isfinite(threshold): threshold = np.inf if verbose: self.plot() return threshold def fdr(self, theta): """Given a threshold theta, find the estimated FDR Parameters ---------- theta : float or array of shape (n_samples) values to test Returns ------- afp : value of array of shape(n) """ from scipy.stats import norm self.fdrcurve() if np.isscalar(theta): if theta > self.sorted_x[ - 1]: return 0 maj = np.where(self.sorted_x >= theta)[0][0] efp = (self.p0 * norm.sf(theta, self.mu, self.sigma) * self.n\ / np.sum(self.x >= theta)) efp = np.maximum(self.sorted_fdr[maj], efp) else: efp = [] for th in theta: if th > self.sorted_x[ - 1]: efp.append(0) continue maj = self.sorted_fdr[np.where(self.sorted_x >= th)[0][0]] efp.append(np.maximum(maj, self.p0 * st.norm.sf(th, self.mu, self.sigma) * self.n / np.sum(self.x >= th))) efp = np.array(efp) # efp = np.minimum(efp, 1) return efp def plot(self, efp=None, alpha=0.05, bar=1, mpaxes=None): """Plot the histogram of x Parameters ------------ efp : float, optional The empirical FDR (corresponding to x) if efp==None, the false positive rate threshold plot is not drawn. alpha : float, optional The chosen FDR threshold bar=1 : bool, optional mpaxes=None: if not None, handle to an axes where the fig will be drawn. Avoids creating unnecessarily new figures """ if not self.learned: self.learn() n = np.size(self.x) bins = max(10, int(2 * np.exp(np.log(n) / 3.))) hist, ledge = np.histogram(self.x, bins=bins) hist = hist.astype('f') / hist.sum() step = ledge[1] - ledge[0] medge = ledge + 0.5 * step import scipy.stats as st g = self.p0 * st.norm.pdf(medge, self.mu, self.sigma) hist /= step import matplotlib.pyplot as plt if mpaxes is None: plt.figure() ax = plt.subplot(1, 1, 1) else: ax = mpaxes if bar: # We need to cut ledge to len(hist) to accommodate for pre and # post numpy 1.3 hist semantic change. ax.bar(ledge[:len(hist)], hist, step) else: ax.plot(medge[:len(hist)], hist, linewidth=2) ax.plot(medge, g, 'r', linewidth=2) ax.set_title('Robust fit of the histogram', fontsize=12) l = ax.legend(('empirical null', 'data'), loc=0) for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) if efp is not None: ax.plot(self.x, np.minimum(alpha, efp), 'k') def three_classes_GMM_fit(x, test=None, alpha=0.01, prior_strength=100, verbose=0, fixed_scale=False, mpaxes=None, bias=0, theta=0, return_estimator=False): """Fit the data with a 3-classes Gaussian Mixture Model, i.e. compute some probability that the voxels of a certain map are in class disactivated, null or active Parameters ---------- x: array of shape (nvox,1) The map to be analysed test: array of shape(nbitems,1), optional the test values for which the p-value needs to be computed by default (if None), test=x alpha: float, optional the prior weights of the positive and negative classes prior_strength: float, optional the confidence on the prior (should be compared to size(x)) verbose: int verbosity mode fixed_scale: bool, optional boolean, variance parameterization. if True, the variance is locked to 1 otherwise, it is estimated from the data mpaxes: axes handle used to plot the figure in verbose mode if None, new axes are created bias: bool allows a rescaling of the posterior probability that takes into account the threshold theta. Not rigorous. theta: float the threshold used to correct the posterior p-values when bias=1; normally, it is such that test>theta note that if theta = -np.inf, the method has a standard behaviour return_estimator: boolean, optional If return_estimator is true, the estimator object is returned. Returns ------- bfp : array of shape (nbitems,3): the posterior probability of each test item belonging to each component in the GMM (sum to 1 across the 3 classes) if np.size(test)==0, i.e. nbitem==0, None is returned estimator : nipy.labs.clustering.GMM object The estimator object, returned only if return_estimator is true. Notes ----- Our convention is that: * class 1 represents the negative class * class 2 represents the null class * class 3 represents the positive class """ from ..clustering.bgmm import VBGMM from ..clustering.gmm import GridDescriptor nvox = np.size(x) x = np.reshape(x, (nvox, 1)) if test is None: test = x if np.size(test) == 0: return None sx = np.sort(x, 0) nclasses = 3 # set the priors from a reasonable model of the data (!) # prior means mb0 = np.mean(sx[:int(alpha * nvox)]) mb2 = np.mean(sx[int((1 - alpha) * nvox):]) prior_means = np.reshape(np.array([mb0, 0, mb2]), (nclasses, 1)) if fixed_scale: prior_scale = np.ones((nclasses, 1, 1)) * 1. / (prior_strength) else: prior_scale = np.ones((nclasses, 1, 1)) * 1. / \ (prior_strength * np.var(x)) prior_dof = np.ones(nclasses) * prior_strength prior_weights = np.array([alpha, 1 - 2 * alpha, alpha]) * prior_strength prior_shrinkage = np.ones(nclasses) * prior_strength # instantiate the class and set the priors BayesianGMM = VBGMM(nclasses, 1, prior_means, prior_scale, prior_weights, prior_shrinkage, prior_dof) BayesianGMM.set_priors(prior_means, prior_weights, prior_scale, prior_dof, prior_shrinkage) # estimate the model BayesianGMM.estimate(x, delta=1.e-8, verbose=max(0, verbose-1)) # create a sampling grid if (verbose or bias): gd = GridDescriptor(1) gd.set([x.min(), x.max()], 100) gdm = gd.make_grid().squeeze() lj = BayesianGMM.likelihood(gd.make_grid()) # estimate the prior weights bfp = BayesianGMM.likelihood(test) if bias: lw = np.sum(lj[gdm > theta], 0) weights = BayesianGMM.weights / (BayesianGMM.weights.sum()) bfp = (lw / weights) * BayesianGMM.slikelihood(test) if verbose and (mpaxes is not False): BayesianGMM.show_components(x, gd, lj, mpaxes) bfp = (bfp.T / bfp.sum(1)).T if not return_estimator: return bfp else: return bfp, BayesianGMM def gamma_gaussian_fit(x, test=None, verbose=0, mpaxes=False, bias=1, gaussian_mix=0, return_estimator=False): """ Computing some prior probabilities that the voxels of a certain map are in class disactivated, null or active using a gamma-Gaussian mixture Parameters ------------ x: array of shape (nvox,) the map to be analysed test: array of shape (nbitems,), optional the test values for which the p-value needs to be computed by default, test = x verbose: 0, 1 or 2, optional verbosity mode, 0 is quiet, and 2 calls matplotlib to display graphs. mpaxes: matplotlib axes, optional axes handle used to plot the figure in verbose mode if None, new axes are created if false, nothing is done bias: float, optional lower bound on the Gaussian variance (to avoid shrinkage) gaussian_mix: float, optional if nonzero, lower bound on the Gaussian mixing weight (to avoid shrinkage) return_estimator: boolean, optional if return_estimator is true, the estimator object is returned. Returns ------- bfp: array of shape (nbitems,3) The probability of each component in the mixture model for each test value estimator: nipy.labs.clustering.ggmixture.GGGM object The estimator object, returned only if return_estimator is true. """ from ..clustering import ggmixture Ggg = ggmixture.GGGM() Ggg.init_fdr(x) Ggg.estimate(x, niter=100, delta=1.e-8, bias=bias, verbose=0, gaussian_mix=gaussian_mix) if mpaxes is not False: # hyper-verbose mode Ggg.show(x, mpaxes=mpaxes) Ggg.parameters() if test is None: test = x test = np.reshape(test, np.size(test)) bfp = np.array(Ggg.posterior(test)).T if return_estimator: return bfp, Ggg return bfp def smoothed_histogram_from_samples(x, bins=None, nbins=256, normalized=False): """ Smooth histogram corresponding to density underlying the samples in `x` Parameters ---------- x: array of shape(n_samples) input data bins: array of shape(nbins+1), optional the bins location nbins: int, optional the number of bins of the resulting histogram normalized: bool, optional if True, the result is returned as a density value Returns ------- h: array of shape (nbins) the histogram bins: array of shape(nbins+1), the bins location """ from scipy.ndimage import gaussian_filter1d # first define the bins if bins is None: h, bins = np.histogram(x, nbins) bins = bins.mean() + 1.2 * (bins - bins.mean()) h, bins = np.histogram(x, bins) # possibly normalize to density h = 1.0 * h dc = bins[1] - bins[0] if normalized: h /= (dc * h.sum()) # define the optimal width sigma = x.std() / (dc * np.exp(.2 * np.log(x.size))) # smooth the histogram h = gaussian_filter1d(h, sigma, mode='constant') return h, bins nipy-0.6.1/nipy/algorithms/statistics/formula/000077500000000000000000000000001470056100100214565ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/formula/__init__.py000066400000000000000000000001661470056100100235720ustar00rootroot00000000000000""" Formula and related objects """ from .formulae import Factor, Formula, Term, make_recarray, natural_spline, terms nipy-0.6.1/nipy/algorithms/statistics/formula/formulae.py000066400000000000000000001310721470056100100236460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Formula objects =============== A formula is basically a sympy expression for the mean of something of the form:: mean = sum([Beta(e)*e for e in expr]) Or, a linear combination of sympy expressions, with each one multiplied by its own "Beta". The elements of expr can be instances of Term (for a linear regression formula, they would all be instances of Term). But, in general, there might be some other parameters (i.e. sympy.Symbol instances) that are not Terms. The design matrix is made up of columns that are the derivatives of mean with respect to everything that is not a Term, evaluated at a recarray that has field names given by [str(t) for t in self.terms]. For those familiar with R's formula syntax, if we wanted a design matrix like the following:: > s.table = read.table("http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/supervisor.table", header=T) > d = model.matrix(lm(Y ~ X1*X3, s.table) ) > d (Intercept) X1 X3 X1:X3 1 1 51 39 1989 2 1 64 54 3456 3 1 70 69 4830 4 1 63 47 2961 5 1 78 66 5148 6 1 55 44 2420 7 1 67 56 3752 8 1 75 55 4125 9 1 82 67 5494 10 1 61 47 2867 11 1 53 58 3074 12 1 60 39 2340 13 1 62 42 2604 14 1 83 45 3735 15 1 77 72 5544 16 1 90 72 6480 17 1 85 69 5865 18 1 60 75 4500 19 1 70 57 3990 20 1 58 54 3132 21 1 40 34 1360 22 1 61 62 3782 23 1 66 50 3300 24 1 37 58 2146 25 1 54 48 2592 26 1 77 63 4851 27 1 75 74 5550 28 1 57 45 2565 29 1 85 71 6035 30 1 82 59 4838 attr(,"assign") [1] 0 1 2 3 > With the Formula, it looks like this: >>> r = np.rec.array([ ... (43, 51, 30, 39, 61, 92, 45), (63, 64, 51, 54, 63, 73, 47), ... (71, 70, 68, 69, 76, 86, 48), (61, 63, 45, 47, 54, 84, 35), ... (81, 78, 56, 66, 71, 83, 47), (43, 55, 49, 44, 54, 49, 34), ... (58, 67, 42, 56, 66, 68, 35), (71, 75, 50, 55, 70, 66, 41), ... (72, 82, 72, 67, 71, 83, 31), (67, 61, 45, 47, 62, 80, 41), ... (64, 53, 53, 58, 58, 67, 34), (67, 60, 47, 39, 59, 74, 41), ... (69, 62, 57, 42, 55, 63, 25), (68, 83, 83, 45, 59, 77, 35), ... (77, 77, 54, 72, 79, 77, 46), (81, 90, 50, 72, 60, 54, 36), ... (74, 85, 64, 69, 79, 79, 63), (65, 60, 65, 75, 55, 80, 60), ... (65, 70, 46, 57, 75, 85, 46), (50, 58, 68, 54, 64, 78, 52), ... (50, 40, 33, 34, 43, 64, 33), (64, 61, 52, 62, 66, 80, 41), ... (53, 66, 52, 50, 63, 80, 37), (40, 37, 42, 58, 50, 57, 49), ... (63, 54, 42, 48, 66, 75, 33), (66, 77, 66, 63, 88, 76, 72), ... (78, 75, 58, 74, 80, 78, 49), (48, 57, 44, 45, 51, 83, 38), ... (85, 85, 71, 71, 77, 74, 55), (82, 82, 39, 59, 64, 78, 39)], ... dtype=[('y', '>> x1 = Term('x1'); x3 = Term('x3') >>> f = Formula([x1, x3, x1*x3]) + I >>> f.mean _b0*x1 + _b1*x3 + _b2*x1*x3 + _b3 The I is the "intercept" term, I have explicitly not used R's default of adding it to everything. >>> f.design(r) #doctest: +FIX +FLOAT_CMP array([(51.0, 39.0, 1989.0, 1.0), (64.0, 54.0, 3456.0, 1.0), (70.0, 69.0, 4830.0, 1.0), (63.0, 47.0, 2961.0, 1.0), (78.0, 66.0, 5148.0, 1.0), (55.0, 44.0, 2420.0, 1.0), (67.0, 56.0, 3752.0, 1.0), (75.0, 55.0, 4125.0, 1.0), (82.0, 67.0, 5494.0, 1.0), (61.0, 47.0, 2867.0, 1.0), (53.0, 58.0, 3074.0, 1.0), (60.0, 39.0, 2340.0, 1.0), (62.0, 42.0, 2604.0, 1.0), (83.0, 45.0, 3735.0, 1.0), (77.0, 72.0, 5544.0, 1.0), (90.0, 72.0, 6480.0, 1.0), (85.0, 69.0, 5865.0, 1.0), (60.0, 75.0, 4500.0, 1.0), (70.0, 57.0, 3990.0, 1.0), (58.0, 54.0, 3132.0, 1.0), (40.0, 34.0, 1360.0, 1.0), (61.0, 62.0, 3782.0, 1.0), (66.0, 50.0, 3300.0, 1.0), (37.0, 58.0, 2146.0, 1.0), (54.0, 48.0, 2592.0, 1.0), (77.0, 63.0, 4851.0, 1.0), (75.0, 74.0, 5550.0, 1.0), (57.0, 45.0, 2565.0, 1.0), (85.0, 71.0, 6035.0, 1.0), (82.0, 59.0, 4838.0, 1.0)], dtype=[('x1', '>> t = Term('x') >>> xval = np.array([(3,),(4,),(5,)], np.dtype([('x', np.float64)])) >>> f = t.formula >>> d = f.design(xval) >>> print(d.dtype.descr) [('x', '>> f.design(xval, return_float=True) array([ 3., 4., 5.]) """ # This flag is defined to avoid using isinstance in getterms # and getparams. _term_flag = True def _getformula(self): return Formula([self]) formula = property(_getformula, doc="Return a Formula with only terms=[self].") def __add__(self, other): if self == other: return self return sympy.Symbol.__add__(self, other) # time symbol T = Term('t') def terms(names, **kwargs): r''' Return list of terms with names given by `names` This is just a convenience in defining a set of terms, and is the equivalent of ``sympy.symbols`` for defining symbols in sympy. We enforce the sympy 0.7.0 behavior of returning symbol "abc" from input "abc", rthan than 3 symbols "a", "b", "c". Parameters ---------- names : str or sequence of str If a single str, can specify multiple ``Term``s with string containing space or ',' as separator. \*\*kwargs : keyword arguments keyword arguments as for ``sympy.symbols`` Returns ------- ts : ``Term`` or tuple ``Term`` instance or list of ``Term`` instance objects named from `names` Examples -------- >>> terms(('a', 'b', 'c')) (a, b, c) >>> terms('a, b, c') (a, b, c) >>> terms('abc') abc ''' if 'each_char' in kwargs: raise TypeError('deprecated "each_char" kwarg removed in sympy>0.7.3') syms = sympy.symbols(names, **kwargs) try: len(syms) except TypeError: return Term(syms.name) return tuple(Term(s.name) for s in syms) class FactorTerm(Term): """ Boolean Term derived from a Factor. Its properties are the same as a Term except that its product with itself is itself. """ # This flag is defined to avoid using isinstance in getterms _factor_term_flag = True def __new__(cls, name, level): # Names or levels can be byte strings new = Term.__new__(cls, f"{_to_str(name)}_{_to_str(level)}") new.level = level new.factor_name = name return new def __mul__(self, other): if self == other: return self else: return sympy.Symbol.__mul__(self, other) class Beta(sympy.Dummy): ''' A symbol tied to a Term `term` ''' def __new__(cls, name, term): new = sympy.Dummy.__new__(cls, name) new._term = term return new def getparams(expression): """ Return the parameters of an expression that are not Term instances but are instances of sympy.Symbol. Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f = Formula([x,y,z]) >>> getparams(f) [] >>> f.mean _b0*x + _b1*y + _b2*z >>> getparams(f.mean) [_b0, _b1, _b2] >>> th = sympy.Symbol('theta') >>> f.mean*sympy.exp(th) (_b0*x + _b1*y + _b2*z)*exp(theta) >>> getparams(f.mean*sympy.exp(th)) [_b0, _b1, _b2, theta] """ atoms = set() expression = np.array(expression) if expression.shape == (): expression = expression.reshape((1,)) if expression.ndim > 1: expression = expression.reshape((np.prod(expression.shape),)) for term in expression: atoms = atoms.union(sympy.sympify(term).atoms()) params = sorted((atom for atom in atoms if isinstance(atom, sympy.Symbol) and not is_term(atom)), key=default_sort_key) return params def getterms(expression): """ Return the all instances of Term in an expression. Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f = Formula([x,y,z]) >>> getterms(f) [x, y, z] >>> getterms(f.mean) [x, y, z] """ atoms = set() expression = np.array(expression) if expression.shape == (): expression = expression.reshape((1,)) if expression.ndim > 1: expression = expression.reshape((np.prod(expression.shape),)) for e in expression: atoms = atoms.union(e.atoms()) terms = sorted((atom for atom in atoms if is_term(atom)), key=default_sort_key) return terms def _recarray_from_array(arr, names, drop_name_dim=_NoValue): """ Create recarray from input array `arr`, field names `names` """ if not arr.dtype.isbuiltin: # Structured array as input # Rename fields dtype = np.dtype([(n, d[1]) for n, d in zip(names, arr.dtype.descr)]) return arr.view(dtype) # Can drop name axis for > 1D arrays or row vectors (scalar per name). can_name_drop = arr.ndim > 1 or len(names) > 1 if can_name_drop and drop_name_dim is _NoValue: warnings.warn( 'Default behavior of make_recarray and > 1D arrays will ' 'change in next Nipy release. Current default returns\n' 'array with same number of dimensions as input, with ' 'axis corresponding to the field names having length 1\n; ' 'Future default will be to drop this length 1 axis. Please ' 'change your code to use explicit True or False for\n' 'compatibility with future Nipy.', VisibleDeprecationWarning, stacklevel=2) # This default will change to True in next version of Nipy drop_name_dim = False dtype = np.dtype([(n, arr.dtype) for n in names]) # At least for numpy <= 1.7.1, the dimension that numpy applies the names # to depends on the memory layout (C or F). Ensure C layout for consistent # application of names to last dimension. rec_arr = np.ascontiguousarray(arr).view(dtype) if can_name_drop and drop_name_dim: rec_arr.shape = arr.shape[:-1] return rec_arr def make_recarray(rows, names, dtypes=None, drop_name_dim=_NoValue): """ Create recarray from `rows` with field names `names` Create a recarray with named columns from a list or ndarray of `rows` and sequence of `names` for the columns. If `rows` is an ndarray, `dtypes` must be None, otherwise we raise a ValueError. Otherwise, if `dtypes` is None, we cast the data in all columns in `rows` as np.float64. If `dtypes` is not None, the routine uses `dtypes` as a dtype specifier for the output structured array. Parameters ---------- rows: list or array Rows that will be turned into an recarray. names: sequence Sequence of strings - names for the columns. dtypes: None or sequence of str or sequence of np.dtype, optional Used to create a np.dtype, can be sequence of np.dtype or string. drop_name_dim : {_NoValue, False, True}, optional Flag for compatibility with future default behavior. Current default is False. If True, drops the length 1 dimension corresponding to the axis transformed into fields when converting into a recarray. If _NoValue specified, gives default. Default will change to True in the next version of Nipy. Returns ------- v : np.ndarray Structured array with field names given by `names`. Examples -------- The following tests depend on machine byte order for their exact output. >>> arr = np.array([[3, 4], [4, 6], [6, 8]]) >>> make_recarray(arr, ['x', 'y'], ... drop_name_dim=True) #doctest: +FIX array([(3, 4), (4, 6), (6, 8)], dtype=[('x', '>> make_recarray(arr, ['x', 'y'], ... drop_name_dim=False) #doctest: +FIX array([[(3, 4)], [(4, 6)], [(6, 8)]], dtype=[('x', '>> r = make_recarray(arr, ['w', 'u'], drop_name_dim=True) >>> make_recarray(r, ['x', 'y'], ... drop_name_dim=True) #doctest: +FIX array([(3, 4), (4, 6), (6, 8)], dtype=[('x', '>> make_recarray([[3, 4], [4, 6], [7, 9]], 'wv', ... [np.float64, np.int_]) #doctest: +FIX +FLOAT_CMP array([(3.0, 4), (4.0, 6), (7.0, 9)], dtype=[('w', '>> s, t = [Term(l) for l in 'st'] >>> f, g = [sympy.Function(l) for l in 'fg'] >>> form = Formula([f(t),g(s)]) >>> newform = form.subs(g, sympy.Function('h')) >>> newform.terms array([f(t), h(s)], dtype=object) >>> form.terms array([f(t), g(s)], dtype=object) """ return self.__class__([term.subs(old, new) for term in self.terms]) def __add__(self, other): """ New Formula combining terms of `self` with those of `other`. Parameters ---------- other : Formula instance Object for which ``is_formula(other)`` is True Returns ------- added : Formula instance Formula combining terms of `self` with terms of `other` Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f1 = Formula([x,y,z]) >>> f2 = Formula([y])+I >>> f3=f1+f2 >>> sorted(f1.terms, key=default_sort_key) [x, y, z] >>> sorted(f2.terms, key=default_sort_key) [1, y] >>> sorted(f3.terms, key=default_sort_key) [1, x, y, y, z] """ if not is_formula(other): raise ValueError('only Formula objects can be added to a Formula') f = Formula(np.hstack([self.terms, other.terms])) return f def __sub__(self, other): """ New Formula by deleting terms in `other` from those in `self` Create and return a new Formula by deleting terms in `other` from those in `self`. No exceptions are raised for terms in `other` that do not appear in `self`. Parameters ---------- other : Formula instance Object for which ``is_formula(other)`` is True Returns ------- subbed : Formula instance Formula with terms of `other` removed from terms of `self` Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f1 = Formula([x, y, z]) >>> f2 = Formula([y]) + I >>> f1.mean _b0*x + _b1*y + _b2*z >>> f2.mean _b0*y + _b1 >>> f3 = f2 - f1 >>> f3.mean _b0 >>> f4 = f1 - f2 >>> f4.mean _b0*x + _b1*z """ if not is_formula(other): raise ValueError( 'only Formula objects can be subtracted from a Formula') # Preserve order of terms in subtraction unwanted = set(other.terms) d = [term for term in self.terms if term not in unwanted] return Formula(d) def __array__(self): return self.terms def _getparams(self): return getparams(self.mean) params = property(_getparams, doc='The parameters in the Formula.') def __mul__(self, other): if not is_formula(other): raise ValueError('only two Formulas can be multiplied together') if is_factor(self): if self == other: return self v = [] # Compute the pairwise product of each term # If either one is a Term, use Term's multiplication for sterm in self.terms: for oterm in other.terms: if is_term(sterm): v.append(Term.__mul__(sterm, oterm)) elif is_term(oterm): v.append(Term.__mul__(oterm, sterm)) else: v.append(sterm*oterm) terms = sorted(set(v), key=default_sort_key) return Formula(tuple(terms)) def __eq__(self, other): s = np.array(self) o = np.array(other) if s.shape != o.shape: return False return np.all(np.equal(np.array(self), np.array(other))) def _setup_design(self): """ Initialize design Create a callable object to evaluate the design matrix at a given set of parameter values to be specified by a recarray and observed Term values, also specified by a recarray. """ # the design expression is the differentiation of the expression # for the mean. It is a list d = self.design_expr # Before evaluating, we recreate the formula # with numbered terms, and numbered parameters. # This renaming has no impact on the # final design matrix as the # callable, self._f below, is a lambda # that does not care about the names of the terms. # First, find all terms in the mean expression, # and rename them in the form "__t%d__" with a # random offset. # This may cause a possible problem # when there are parameters named something like "__t%d__". # Using the random offset will minimize the possibility # of this happening. # This renaming is here principally because of the intercept. random_offset = np.random.randint(low=0, high=2**30) terms = getterms(self.mean) newterms = [] for i, t in enumerate(terms): newt = sympy.Symbol("__t%d__" % (i + random_offset)) for j, _ in enumerate(d): d[j] = d[j].subs(t, newt) newterms.append(newt) # Next, find all parameters that remain in the design expression. # In a standard regression model, there will be no parameters # because they will all be differentiated away in computing # self.design_expr. In nonlinear models, parameters will remain. params = getparams(self.design_expr) newparams = [] for i, p in enumerate(params): newp = Dummy("__p%d__" % (i + random_offset)) for j, _ in enumerate(d): d[j] = d[j].subs(p, newp) newparams.append(newp) # If there are any aliased functions, these need to be added # to the name space before sympy lambdifies the expression # These "aliased" functions are used for things like # the natural splines, etc. You can represent natural splines # with sympy but the expression is pretty awful. Note that # ``d`` here is list giving the differentiation of the # expression for the mean. self._f(...) therefore also returns # a list self._f = lambdify(newparams + newterms, d, ("numpy")) # The input to self.design will be a recarray of that must # have field names that the Formula will expect to see. # However, if any of self.terms are FactorTerms, then the field # in the recarray will not actually be in the Term. # # For example, if there is a Factor 'f' with levels ['a','b'], # there will be terms 'f_a' and 'f_b', though the input to # design will have a field named 'f'. In this sense, # the recarray used in the call to self.design # is not really made up of terms, but "preterms". # In this case, the callable preterm = [] for t in terms: if not is_factor_term(t): preterm.append(str(t)) else: preterm.append(t.factor_name) preterm = list(set(preterm)) # There is also an argument for parameters that are not # Terms. self._dtypes = {'param':np.dtype([(str(p), np.float64) for p in params]), 'term':np.dtype([(str(t), np.float64) for t in terms]), 'preterm':np.dtype([(n, np.float64) for n in preterm])} self.__terms = terms def design(self, input, param=None, return_float=False, contrasts=None): """ Construct the design matrix, and optional contrast matrices. Parameters ---------- input : np.recarray Recarray including fields needed to compute the Terms in getparams(self.design_expr). param : None or np.recarray Recarray including fields that are not Terms in getparams(self.design_expr) return_float : bool, optional If True, return a np.float64 array rather than a np.recarray contrasts : None or dict, optional Contrasts. The items in this dictionary should be (str, Formula) pairs where a contrast matrix is constructed for each Formula by evaluating its design at the same parameters as self.design. If not None, then the return_float is set to True. Returns ------- des : 2D array design matrix cmatrices : dict, optional Dictionary with keys from `contrasts` input, and contrast matrices corresponding to `des` design matrix. Returned only if `contrasts` input is not None """ self._setup_design() preterm_recarray = input param_recarray = param # The input to design should have field names for all fields in self._dtypes['preterm'] if not set(preterm_recarray.dtype.names).issuperset(self._dtypes['preterm'].names): raise ValueError("for term, expecting a recarray with " "dtype having the following names: {!r}".format(self._dtypes['preterm'].names)) # The parameters should have field names for all fields in self._dtypes['param'] if param_recarray is not None: if not set(param_recarray.dtype.names).issuperset(self._dtypes['param'].names): raise ValueError("for param, expecting a recarray with " "dtype having the following names: {!r}".format(self._dtypes['param'].names)) # If the only term is an intercept, # the return value is a matrix of 1's. if list(self.terms) == [sympy.Number(1)]: a = np.ones(preterm_recarray.shape[0], np.float64) if not return_float: a = a.view(np.dtype([('intercept', np.float64)])) return a elif not self._dtypes['term']: raise ValueError("none of the expressions in self.terms " "are Term instances; shape of resulting " "undefined") # The term_recarray is essentially the same as preterm_recarray, # except that all factors in self are expanded # into their respective binary columns. term_recarray = np.zeros(preterm_recarray.shape[0], dtype=self._dtypes['term']) for t in self.__terms: if not is_factor_term(t): term_recarray[t.name] = preterm_recarray[t.name] else: factor_col = preterm_recarray[t.factor_name] # Python 3: If column type is bytes, convert to string, to allow # level comparison if factor_col.dtype.kind == 'S': factor_col = factor_col.astype('U') fl_ind = np.array([x == t.level for x in factor_col]).reshape(-1) term_recarray[f'{t.factor_name}_{t.level}'] = fl_ind # The lambda created in self._setup_design needs to take a tuple of # columns as argument, not an ndarray, so each column # is extracted and put into float_tuple. float_array = term_recarray.view(np.float64) float_array.shape = (term_recarray.shape[0], -1) float_array = float_array.T float_tuple = tuple(float_array) # If there are any parameters, they also must be extracted # and put into a tuple with the order specified # by self._dtypes['param'] if param_recarray is not None: param = tuple(float(param_recarray[n]) for n in self._dtypes['param'].names) else: param = () # Evaluate the design at the parameters and tuple of arrays D = self._f(*(param+float_tuple)) # TODO: check if this next stepis necessary # I think it is because the lambda evaluates sympy.Number(1) to 1 # and not an array. D_tuple = [np.asarray(w) for w in D] need_to_modify_shape = [] OK_row_shapes = [] for i, row in enumerate(D_tuple): if row.shape in [(),(1,)]: need_to_modify_shape.append(i) else: OK_row_shapes.append(row.shape[0]) # Make sure that each array has the correct shape. # The columns in need_to_modify should just be # the intercept column, which evaluates to have shape == (). # This makes sure that it has the correct number of rows. for i in need_to_modify_shape: D_tuple[i].shape = () D_tuple[i] = np.multiply.outer(D_tuple[i], np.ones(preterm_recarray.shape[0])) # At this point, all the columns have the correct shape and the # design matrix is almost ready to output. D = np.array(D_tuple).T # If we will return a float matrix or any contrasts, # we may have some reshaping to do. if contrasts is None: contrasts = {} if return_float or contrasts: # If the design matrix is just a column of 1s # return a 1-dimensional array. D = np.squeeze(D.astype(np.float64)) # If there are contrasts, the pseudo-inverse of D # must be computed. if contrasts: if D.ndim == 1: _D = D.reshape((D.shape[0], 1)) else: _D = D pinvD = np.linalg.pinv(_D) else: # Correct the dtype. # XXX There seems to be a lot of messing around with the dtype. # This would be a convenient place to just add # labels like a DataArray. D = np.array([tuple(r) for r in D], self.dtype) # Compute the contrast matrices, if any. if contrasts: cmatrices = {} for key, cf in contrasts.items(): if not is_formula(cf): cf = Formula([cf]) L = cf.design(input, param=param_recarray, return_float=True) cmatrices[key] = contrast_from_cols_or_rows(L, _D, pseudo=pinvD) return D, cmatrices else: return D def natural_spline(t, knots=None, order=3, intercept=False): """ Return a Formula containing a natural spline Spline for a Term with specified `knots` and `order`. Parameters ---------- t : ``Term`` knots : None or sequence, optional Sequence of float. Default None (same as empty list) order : int, optional Order of the spline. Defaults to a cubic (==3) intercept : bool, optional If True, include a constant function in the natural spline. Default is False Returns ------- formula : Formula A Formula with (len(knots) + order) Terms (if intercept=False, otherwise includes one more Term), made up of the natural spline functions. Examples -------- >>> x = Term('x') >>> n = natural_spline(x, knots=[1,3,4], order=3) >>> xval = np.array([3,5,7.]).view(np.dtype([('x', np.float64)])) >>> n.design(xval, return_float=True) array([[ 3., 9., 27., 8., 0., -0.], [ 5., 25., 125., 64., 8., 1.], [ 7., 49., 343., 216., 64., 27.]]) >>> d = n.design(xval) >>> print(d.dtype.descr) [('ns_1(x)', '>> f = Factor('a', ['x','y']) >>> sf = f.stratify('theta') >>> sf.mean _theta0*a_x + _theta1*a_y """ if not set(str(variable)).issubset(ascii_letters + digits): raise ValueError('variable should be interpretable as a ' 'name and not have anything but digits ' 'and numbers') variable = sympy.sympify(variable) f = Formula(self._terms, char=variable) f.name = self.name return f @staticmethod def fromcol(col, name): """ Create a Factor from a column array. Parameters ---------- col : ndarray an array with ndim==1 name : str name of the Factor Returns ------- factor : Factor Examples -------- >>> data = np.array([(3,'a'),(4,'a'),(5,'b'),(3,'b')], np.dtype([('x', np.float64), ('y', 'S1')])) >>> f1 = Factor.fromcol(data['y'], 'y') >>> f2 = Factor.fromcol(data['x'], 'x') >>> d = f1.design(data) >>> print(d.dtype.descr) [('y_a', '>> d = f2.design(data) >>> print(d.dtype.descr) [('x_3', ' 1): raise ValueError('expecting an array that can be thought ' 'of as a column or field of a recarray') levels = np.unique(col) if not col.dtype.names and not name: name = 'factor' elif col.dtype.names: name = col.dtype.names[0] return Factor(name, levels) def contrast_from_cols_or_rows(L, D, pseudo=None): """ Construct a contrast matrix from a design matrix D (possibly with its pseudo inverse already computed) and a matrix L that either specifies something in the column space of D or the row space of D. Parameters ---------- L : ndarray Matrix used to try and construct a contrast. D : ndarray Design matrix used to create the contrast. pseudo : None or array-like, optional If not None, gives pseudo-inverse of `D`. Allows you to pass this if it is already calculated. Returns ------- C : ndarray Matrix with C.shape[1] == D.shape[1] representing an estimable contrast. Notes ----- From an n x p design matrix D and a matrix L, tries to determine a p x q contrast matrix C which determines a contrast of full rank, i.e. the n x q matrix dot(transpose(C), pinv(D)) is full rank. L must satisfy either L.shape[0] == n or L.shape[1] == p. If L.shape[0] == n, then L is thought of as representing columns in the column space of D. If L.shape[1] == p, then L is thought of as what is known as a contrast matrix. In this case, this function returns an estimable contrast corresponding to the dot(D, L.T) This always produces a meaningful contrast, not always with the intended properties because q is always non-zero unless L is identically 0. That is, it produces a contrast that spans the column space of L (after projection onto the column space of D). """ L = np.asarray(L) D = np.asarray(D) n, p = D.shape if L.shape[0] != n and L.shape[1] != p: raise ValueError('shape of L and D mismatched') if pseudo is None: pseudo = pinv(D) if L.shape[0] == n: C = np.dot(pseudo, L).T else: C = np.dot(pseudo, np.dot(D, L.T)).T Lp = np.dot(D, C.T) if len(Lp.shape) == 1: Lp.shape = (n, 1) Lp_rank = matrix_rank(Lp) if Lp_rank != Lp.shape[1]: Lp = full_rank(Lp, Lp_rank) C = np.dot(pseudo, Lp).T return np.squeeze(C) class RandomEffects(Formula): """ Covariance matrices for common random effects analyses. Examples -------- Two subjects (here named 2 and 3): >>> subj = make_recarray([2,2,2,3,3], 's') >>> subj_factor = Factor('s', [2,3]) By default the covariance matrix is symbolic. The display differs a little between sympy versions (hence we don't check it in the doctests): >>> c = RandomEffects(subj_factor.terms) >>> c.cov(subj) #doctest: +IGNORE_OUTPUT array([[_s2_0, _s2_0, _s2_0, 0, 0], [_s2_0, _s2_0, _s2_0, 0, 0], [_s2_0, _s2_0, _s2_0, 0, 0], [0, 0, 0, _s2_1, _s2_1], [0, 0, 0, _s2_1, _s2_1]], dtype=object) With a numeric `sigma`, you get a numeric array: >>> c = RandomEffects(subj_factor.terms, sigma=np.array([[4,1],[1,6]])) >>> c.cov(subj) array([[ 4., 4., 4., 1., 1.], [ 4., 4., 4., 1., 1.], [ 4., 4., 4., 1., 1.], [ 1., 1., 1., 6., 6.], [ 1., 1., 1., 6., 6.]]) """ def __init__(self, seq, sigma=None, char = 'e'): """ Initialize random effects instance Parameters ---------- seq : [``sympy.Basic``] sigma : ndarray Covariance of the random effects. Defaults to a diagonal with entries for each random effect. char : character for regression coefficient """ self._terms = np.asarray(seq) q = self._terms.shape[0] self._counter = 0 if sigma is None: self.sigma = np.diag([Dummy('s2_%d' % i) for i in range(q)]) else: self.sigma = sigma if self.sigma.shape != (q,q): raise ValueError('incorrect shape for covariance ' 'of random effects, ' f'should have shape {q!r}') self.char = char def cov(self, term, param=None): """ Compute the covariance matrix for some given data. Parameters ---------- term : np.recarray Recarray including fields corresponding to the Terms in getparams(self.design_expr). param : np.recarray Recarray including fields that are not Terms in getparams(self.design_expr) Returns ------- C : ndarray Covariance matrix implied by design and self.sigma. """ D = self.design(term, param=param, return_float=True) return np.dot(D, np.dot(self.sigma, D.T)) def is_term(obj): """ Is obj a Term? """ return hasattr(obj, "_term_flag") def is_factor_term(obj): """ Is obj a FactorTerm? """ return hasattr(obj, "_factor_term_flag") def is_formula(obj): """ Is obj a Formula? """ return hasattr(obj, "_formula_flag") def is_factor(obj): """ Is obj a Factor? """ return hasattr(obj, "_factor_flag") nipy-0.6.1/nipy/algorithms/statistics/formula/tests/000077500000000000000000000000001470056100100226205ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/formula/tests/__init__.py000066400000000000000000000000271470056100100247300ustar00rootroot00000000000000# Make tests a package nipy-0.6.1/nipy/algorithms/statistics/formula/tests/test_formula.py000066400000000000000000000430431470056100100257020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for formulae """ from warnings import catch_warnings, simplefilter import numpy as np import pytest import sympy from numpy.testing import assert_almost_equal, assert_array_equal from sympy.utilities.lambdify import implemented_function from nipy.utils import VisibleDeprecationWarning from .. import formulae as F from ..formulae import Term, terms def test_terms(): t = terms('a') assert isinstance(t, Term) a, b, c = Term('a'), Term('b'), Term('c') assert t == a ts = terms(('a', 'b', 'c')) assert ts == (a, b, c) # a string without separator chars returns one symbol. This is the # sympy 0.7 behavior assert terms('abc') == Term('abc') # separators return multiple symbols assert terms('a b c') == (a, b, c) assert terms('a, b, c') == (a, b, c) # no arg is an error pytest.raises(TypeError, terms) # but empty arg returns empty tuple assert terms(()) == () # Test behavior of deprecated each_char kwarg pytest.raises(TypeError, terms, 'abc', each_char=True) def test_getparams_terms(): t = F.Term('t') x, y, z = (sympy.Symbol(l) for l in 'xyz') assert set(F.getparams(x*y*t)) == {x,y} assert set(F.getterms(x*y*t)) == {t} matrix_expr = np.array([[x,y*t],[y,z]]) assert set(F.getparams(matrix_expr)) == {x,y,z} assert set(F.getterms(matrix_expr)) == {t} def test_formula_params(): t = F.Term('t') x, y = (sympy.Symbol(l) for l in 'xy') f = F.Formula([t*x,y]) assert set(f.params) == set([x,y] + list(f.coefs.values())) def test_contrast1(): x = F.Term('x') assert x == x+x y = F.Term('y') z = F.Term('z') f = F.Formula([x,y]) arr = F.make_recarray([[3,5,4],[8,21,-1],[4,6,-2]], 'xyz') D, C = f.design(arr, contrasts={'x':x.formula, 'diff':F.Formula([x-y]), 'sum':F.Formula([x+y]), 'both':F.Formula([x-y,x+y])}) assert_almost_equal(C['x'], np.array([1,0])) assert_almost_equal(C['diff'], np.array([1,-1])) assert_almost_equal(C['sum'], np.array([1,1])) assert_almost_equal(C['both'], np.array([[1,-1],[1,1]])) f = F.Formula([x,y,z]) arr = F.make_recarray([[3,5,4],[8,21,-1],[4,6,-2]], 'xyz') D, C = f.design(arr, contrasts={'x':x.formula, 'diff':F.Formula([x-y]), 'sum':F.Formula([x+y]), 'both':F.Formula([x-y,x+y])}) assert_almost_equal(C['x'], np.array([1,0,0])) assert_almost_equal(C['diff'], np.array([1,-1,0])) assert_almost_equal(C['sum'], np.array([1,1,0])) assert_almost_equal(C['both'], np.array([[1,-1,0],[1,1,0]])) def test_formula_from_recarray(): D = np.rec.array([ (43, 51, 30, 39, 61, 92, 'blue'), (63, 64, 51, 54, 63, 73, 'blue'), (71, 70, 68, 69, 76, 86, 'red'), (61, 63, 45, 47, 54, 84, 'red'), (81, 78, 56, 66, 71, 83, 'blue'), (43, 55, 49, 44, 54, 49, 'blue'), (58, 67, 42, 56, 66, 68, 'green'), (71, 75, 50, 55, 70, 66, 'green'), (72, 82, 72, 67, 71, 83, 'blue'), (67, 61, 45, 47, 62, 80, 'red'), (64, 53, 53, 58, 58, 67, 'blue'), (67, 60, 47, 39, 59, 74, 'green'), (69, 62, 57, 42, 55, 63, 'blue'), (68, 83, 83, 45, 59, 77, 'red'), (77, 77, 54, 72, 79, 77, 'red'), (81, 90, 50, 72, 60, 54, 'blue'), (74, 85, 64, 69, 79, 79, 'green'), (65, 60, 65, 75, 55, 80, 'green'), (65, 70, 46, 57, 75, 85, 'red'), (50, 58, 68, 54, 64, 78, 'red'), (50, 40, 33, 34, 43, 64, 'blue'), (64, 61, 52, 62, 66, 80, 'blue'), (53, 66, 52, 50, 63, 80, 'red'), (40, 37, 42, 58, 50, 57, 'red'), (63, 54, 42, 48, 66, 75, 'blue'), (66, 77, 66, 63, 88, 76, 'blue'), (78, 75, 58, 74, 80, 78, 'red'), (48, 57, 44, 45, 51, 83, 'blue'), (85, 85, 71, 71, 77, 74, 'red'), (82, 82, 39, 59, 64, 78, 'blue')], dtype=[('y', 'i8'), ('x1', 'i8'), ('x2', 'i8'), ('x3', 'i8'), ('x4', 'i8'), ('x5', 'i8'), ('x6', '|S5')]) f = F.Formula.fromrec(D, drop='y') assert ({str(t) for t in f.terms} == {'x1', 'x2', 'x3', 'x4', 'x5', 'x6_green', 'x6_blue', 'x6_red'}) assert ({str(t) for t in f.design_expr} == {'x1', 'x2', 'x3', 'x4', 'x5', 'x6_green', 'x6_blue', 'x6_red'}) def test_random_effects(): subj = F.make_recarray([2,2,2,3,3], 's') subj_factor = F.Factor('s', [2,3]) c = F.RandomEffects(subj_factor.terms, sigma=np.array([[4,1],[1,6]])) C = c.cov(subj) assert_almost_equal(C, [[4,4,4,1,1], [4,4,4,1,1], [4,4,4,1,1], [1,1,1,6,6], [1,1,1,6,6]]) # Sympy 0.7.0 does not cancel 1.0 * A to A; however, the dot product in the # covariance calculation returns floats, which are them multiplied by the # terms to give term * 1.0, etc. We just insert the annoying floating point # here for the test, relying on sympy to do the same thing here as in the # dot product a = sympy.Symbol('a') * 1.0 b = sympy.Symbol('b') * 1.0 c = F.RandomEffects(subj_factor.terms, sigma=np.array([[a,0],[0,b]])) C = c.cov(subj) t = np.equal(C, [[a,a,a,0,0], [a,a,a,0,0], [a,a,a,0,0], [0,0,0,b,b], [0,0,0,b,b]]) assert np.all(t) def test_design_expression(): t1 = F.Term("x") t2 = F.Term('y') f = t1.formula + t2.formula assert str(f.design_expr) in ['[x, y]', '[y, x]'] def test_formula_property(): # Check that you can create a Formula with one term t1 = F.Term("x") f = t1.formula assert f.design_expr == [t1] def test_mul(): f = F.Factor('t', [2,3]) f2 = F.Factor('t', [2,3,4]) t2 = f['t_2'] x = F.Term('x') assert t2 == t2*t2 assert f == f*f assert f != f2 assert set((t2*x).atoms()) == {t2,x} def test_factor_add_sub(): # Test adding and subtracting Factors f1 = F.Factor('t', [2, 3, 4]) f2 = F.Factor('t', [2, 3]) # Terms do not cancel in addition assert f1 + f2 == F.Formula(np.hstack((f1.terms, f2.terms))) assert f1 - f2 == F.Factor('t', [4]) f3 = F.Factor('p', [0, 1]) assert f1 + f3 == F.Formula(np.hstack((f1.terms, f3.terms))) assert f1 - f3 == f1 def test_term_order_sub(): # Test preservation of term order in subtraction f1 = F.Formula(terms('z, y, x, w')) f2 = F.Formula(terms('x, y, a')) assert_array_equal((f1 - f2).terms, terms('z, w')) assert_array_equal((f2 - f1).terms, terms('a')) def assert_starr_equal(a, b): assert a.shape == b.shape assert a.dtype.names == b.dtype.names for name in a.dtype.names: assert_array_equal(a[name], b[name]) assert a[name].dtype == b[name].dtype def test_make_recarray(): # Test make_array # From list / sequence # 2D case fromrecords = np.rec.fromrecords data_2d = [(3, 4), (4, 6), (7, 9)] m = F.make_recarray(data_2d, 'wv', [np.float64, np.int_]) assert_starr_equal(m, fromrecords( data_2d, dtype=[('w', float), ('v', int)])) # 1D vector, sequence and array for data_1d in (range(4), np.arange(4).astype(float)): # Column vector. For array case, drop name dim for shape match assert_starr_equal( F.make_recarray(data_1d, ['f1'], drop_name_dim=True), np.arange(4).astype([('f1', float)])) # Row vector. Drop name dim for shape match assert_starr_equal( F.make_recarray(data_1d, 'abcd', drop_name_dim=True), np.array(tuple(range(4)), dtype=[(c, float) for c in 'abcd'])) # From another recarray, reaming fields m2 = F.make_recarray(m, 'xy') assert_starr_equal(m2, fromrecords( data_2d, dtype=[('x', float), ('y', int)])) # Recarrays don't change shape, trailing dimensions or no assert_starr_equal(F.make_recarray(m2, 'xy'), m2) m2_dash = np.reshape(m2, (3, 1, 1, 1)) assert_starr_equal(F.make_recarray(m2_dash, 'xy'), m2_dash) # From an array, drop dim case arr = np.array(data_2d) assert arr.shape == (3, 2) assert_starr_equal( F.make_recarray(arr, 'xy', drop_name_dim=True), fromrecords(data_2d, dtype=[('x', int), ('y', int)])) assert_starr_equal( F.make_recarray(arr.astype(float), 'xy', drop_name_dim=True), fromrecords(data_2d, dtype=[('x', float), ('y', float)])) assert_starr_equal( F.make_recarray(arr.reshape((3, 1, 2)), 'xy', drop_name_dim=True), fromrecords(data_2d, dtype=[('x', int), ('y', int)]). reshape((3, 1))) # Not drop dim case, trailing length 1 axis. assert_starr_equal( F.make_recarray(arr, 'xy', drop_name_dim=False), fromrecords(data_2d, dtype=[('x', int), ('y', int)]). reshape((3, 1))) assert_starr_equal( F.make_recarray(arr.reshape((3, 1, 2)), 'xy', drop_name_dim=False), fromrecords(data_2d, dtype=[('x', int), ('y', int)]). reshape((3, 1, 1))) # False case is the default, with warning (for now) with catch_warnings(record=True) as warn_list: # Clear any pre-existing warnings cached in formula module, to make # sure warning is triggered.. See # nibabel.testing.clear_and_catch_warnings for detail. if hasattr(F, '__warningregistry__'): F.__warningregistry__.clear() simplefilter('always') assert_starr_equal( F.make_recarray(arr, 'xy'), fromrecords(data_2d, dtype=[('x', int), ('y', int)]). reshape((3, 1))) assert warn_list[0].category == VisibleDeprecationWarning # Can't pass dtypes to array version of function pytest.raises(ValueError, F.make_recarray, arr, 'xy', [int, float]) def test_make_recarray_axes(): # On earlier numpy, axis to which names applied depends on memory layout # C contiguous arr = np.arange(9).reshape((3,3)) s_arr = F.make_recarray(arr, 'abc', drop_name_dim=True) assert_array_equal(s_arr['a'], arr[:, 0]) # Fortran contiguous s_arr = F.make_recarray(arr.T, 'abc', drop_name_dim=True) assert_array_equal(s_arr['a'], arr[0, :]) def test_str_formula(): t1 = F.Term('x') t2 = F.Term('y') f = F.Formula([t1, t2]) assert str(f) == "Formula([x, y])" def test_design(): # Check that you get the design matrix we expect t1 = F.Term("x") t2 = F.Term('y') n = F.make_recarray([2,4,5], 'x') assert_almost_equal(t1.formula.design(n)['x'], n['x']) f = t1.formula + t2.formula n = F.make_recarray([(2,3),(4,5),(5,6)], 'xy') assert_almost_equal(f.design(n)['x'], n['x']) assert_almost_equal(f.design(n)['y'], n['y']) f = t1.formula + t2.formula + F.I + t1.formula * t2.formula assert_almost_equal(f.design(n)['x'], n['x']) assert_almost_equal(f.design(n)['y'], n['y']) assert_almost_equal(f.design(n)['1'], 1) assert_almost_equal(f.design(n)['x*y'], n['x']*n['y']) # drop x field, check that design raises error ny = np.recarray(n.shape, dtype=[('x', n.dtype['x'])]) ny['x'] = n['x'] pytest.raises(ValueError, f.design, ny) n = np.array([(2,3,'a'),(4,5,'b'),(5,6,'a')], np.dtype([('x', np.float64), ('y', np.float64), ('f', 'S1')])) f = F.Factor('f', ['a','b']) ff = t1.formula * f + F.I assert_almost_equal(ff.design(n)['f_a*x'], n['x']*[1,0,1]) assert_almost_equal(ff.design(n)['f_b*x'], n['x']*[0,1,0]) assert_almost_equal(ff.design(n)['1'], 1) def test_design_inputs(): # Check we can send in fields of type 'S', 'U', 'O' for design regf = F.Formula(F.terms('x, y')) f = F.Factor('f', ['a', 'b']) ff = regf + f for field_type in ('S1', 'U1', 'O'): data = np.array([(2, 3, 'a'), (4, 5, 'b'), (5, 6, 'a')], dtype = [('x', np.float64), ('y', np.float64), ('f', field_type)]) assert_array_equal(ff.design(data, return_float=True), [[2, 3, 1, 0], [4, 5, 0, 1], [5, 6, 1, 0]]) def test_formula_inputs(): # Check we can send in fields of type 'S', 'U', 'O' for factor levels level_names = ['red', 'green', 'blue'] for field_type in ('S', 'U', 'O'): levels = np.array(level_names, dtype=field_type) f = F.Factor('myname', levels) assert f.levels == level_names # Sending in byte objects levels = [L.encode() for L in level_names] f = F.Factor('myname', levels) assert f.levels == level_names def test_alias(): x = F.Term('x') f = implemented_function('f', lambda x: 2*x) g = implemented_function('g', lambda x: np.sqrt(x)) ff = F.Formula([f(x), g(x)**2]) n = F.make_recarray([2,4,5], 'x') assert_almost_equal(ff.design(n)['f(x)'], n['x']*2) assert_almost_equal(ff.design(n)['g(x)**2'], n['x']) def test_factor_getterm(): fac = F.Factor('f', 'ab') assert fac['f_a'] == fac.get_term('a') fac = F.Factor('f', [1,2]) assert fac['f_1'] == fac.get_term(1) fac = F.Factor('f', [1,2]) pytest.raises(ValueError, fac.get_term, '1') m = fac.main_effect assert set(m.terms) == {fac['f_1']-fac['f_2']} def test_stratify(): fac = F.Factor('x', [2,3]) y = sympy.Symbol('y') f = sympy.Function('f') pytest.raises(ValueError, fac.stratify, f(y)) def test_nonlin1(): # Fit an exponential curve, with the exponent stratified by a factor # with a common intercept and multiplicative factor in front of the # exponential x = F.Term('x') fac = F.Factor('f', 'ab') f = F.Formula([sympy.exp(fac.stratify(x).mean)]) + F.I params = F.getparams(f.mean) assert ({str(p) for p in params} == {'_x0', '_x1', '_b0', '_b1'}) test1 = {'1', 'exp(_x0*f_a + _x1*f_b)', '_b0*f_a*exp(_x0*f_a + _x1*f_b)', '_b0*f_b*exp(_x0*f_a + _x1*f_b)'} test2 = {'1', 'exp(_x0*f_a + _x1*f_b)', '_b1*f_a*exp(_x0*f_a + _x1*f_b)', '_b1*f_b*exp(_x0*f_a + _x1*f_b)'} assert test1 or test2 n = F.make_recarray([(2,3,'a'),(4,5,'b'),(5,6,'a')], 'xyf', ['d','d','S1']) p = F.make_recarray([1,2,3,4], ['_x0', '_x1', '_b0', '_b1']) A = f.design(n, p) print(A, A.dtype) def test_intercept(): dz = F.make_recarray([2,3,4],'z') v = F.I.design(dz, return_float=False) assert v.dtype.names == ('intercept',) def test_nonlin2(): dz = F.make_recarray([2,3,4],'z') z = F.Term('z') t = sympy.Symbol('th') p = F.make_recarray([3], ['tt']) f = F.Formula([sympy.exp(t*z)]) pytest.raises(ValueError, f.design, dz, p) def test_Rintercept(): x = F.Term('x') y = F.Term('x') xf = x.formula yf = y.formula newf = (xf+F.I)*(yf+F.I) assert set(newf.terms) == {x,y,x*y,sympy.Number(1)} def test_return_float(): x = F.Term('x') f = F.Formula([x,x**2]) xx= F.make_recarray(np.linspace(0,10,11), 'x') dtype = f.design(xx).dtype assert set(dtype.names) == {'x', 'x**2'} dtype = f.design(xx, return_float=True).dtype assert dtype == np.float64 def test_subtract(): x, y, z = (F.Term(l) for l in 'xyz') f1 = F.Formula([x,y]) f2 = F.Formula([x,y,z]) f3 = f2 - f1 assert set(f3.terms) == {z} f4 = F.Formula([y,z]) f5 = f1 - f4 assert set(f5.terms) == {x} def test_subs(): t1 = F.Term("x") t2 = F.Term('y') z = F.Term('z') f = F.Formula([t1, t2]) g = f.subs(t1, z) assert list(g.terms) == [z, t2] def test_natural_spline(): xt=F.Term('x') ns=F.natural_spline(xt, knots=[2,6,9]) xx= F.make_recarray(np.linspace(0,10,101), 'x') dd=ns.design(xx, return_float=True) xx = xx['x'] assert_almost_equal(dd[:,0], xx) assert_almost_equal(dd[:,1], xx**2) assert_almost_equal(dd[:,2], xx**3) assert_almost_equal(dd[:,3], (xx-2)**3*np.greater_equal(xx,2)) assert_almost_equal(dd[:,4], (xx-6)**3*np.greater_equal(xx,6)) assert_almost_equal(dd[:,5], (xx-9)**3*np.greater_equal(xx,9)) ns=F.natural_spline(xt, knots=[2,9,6], intercept=True) xx= F.make_recarray(np.linspace(0,10,101), 'x') dd=ns.design(xx, return_float=True) xx = xx['x'] assert_almost_equal(dd[:,0], 1) assert_almost_equal(dd[:,1], xx) assert_almost_equal(dd[:,2], xx**2) assert_almost_equal(dd[:,3], xx**3) assert_almost_equal(dd[:,4], (xx-2)**3*np.greater_equal(xx,2)) assert_almost_equal(dd[:,5], (xx-9)**3*np.greater_equal(xx,9)) assert_almost_equal(dd[:,6], (xx-6)**3*np.greater_equal(xx,6)) def test_factor_term(): # Test that byte strings, unicode strings and objects convert correctly for nt in 'S3', 'U3', 'O': ndt = np.dtype(nt) for lt in 'S3', 'U3', 'O': ldt = np.dtype(lt) name = np.array('foo', ndt).item() level = np.array('bar', ldt).item() ft = F.FactorTerm(name, level) assert str(ft) == 'foo_bar' nipy-0.6.1/nipy/algorithms/statistics/histogram.pyx000066400000000000000000000016301470056100100225500ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Author: Alexis Roche, 2012. """ import numpy as np cimport numpy as np np.import_array() def histogram(x): """ Fast histogram computation assuming input array is of uintp data type. Parameters ---------- x: array-like Assumed with uintp dtype Output ------ h: 1d array Histogram """ if not x.dtype=='uintp': raise ValueError('input array should have uintp data type') cdef np.npy_uintp xv cdef np.npy_uintp nbins = x.max() + 1 cdef np.flatiter it = x.flat cdef np.ndarray h = np.zeros(nbins, dtype='uintp') cdef np.npy_uintp* hv while np.PyArray_ITER_NOTDONE(it): xv = (np.PyArray_ITER_DATA(it))[0] hv = np.PyArray_DATA(h) + xv hv[0] += 1 np.PyArray_ITER_NEXT(it) return h nipy-0.6.1/nipy/algorithms/statistics/intvol.pyx000066400000000000000000001072261470056100100220760ustar00rootroot00000000000000""" The estimators for the intrinsic volumes appearing in this module were partially supported by NSF grant DMS-0405970. Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ cimport cython import numpy as np cimport numpy as np from scipy.sparse import dok_matrix # Array helper from nipy.utils.arrays import strides_from # local imports from .utils import cube_with_strides_center, join_complexes, check_cast_bin8 cdef double PI = np.pi cdef extern from "math.h" nogil: double floor(double x) double sqrt(double x) double fabs(double x) double log2(double x) double acos(double x) bint isnan(double x) cpdef double mu3_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Compute the 3rd intrinsic volume of a tetrahedron. 3rd intrinsic volume (just volume in this case) of a tetrahedron with coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu3 : float volume of tetrahedron """ cdef double C00, C01, C02, C11, C12, C22, v2 C00 = D00 - 2*D03 + D33 C01 = D01 - D13 - D03 + D33 C02 = D02 - D23 - D03 + D33 C11 = D11 - 2*D13 + D33 C12 = D12 - D13 - D23 + D33 C22 = D22 - 2*D23 + D33 v2 = (C00 * (C11 * C22 - C12 * C12) - C01 * (C01 * C22 - C02 * C12) + C02 * (C01 * C12 - C11 * C02)) # Rounding errors near 0 cause NaNs if v2 <= 0: return 0 return sqrt(v2) / 6. cpdef double mu2_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Compute the 2nd intrinsic volume of tetrahedron 2nd intrinsic volume (half the surface area) of a tetrahedron with coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu2 : float Half tetrahedron surface area """ cdef double mu = 0 mu += mu2_tri(D00, D01, D02, D11, D12, D22) mu += mu2_tri(D00, D02, D03, D22, D23, D33) mu += mu2_tri(D11, D12, D13, D22, D23, D33) mu += mu2_tri(D00, D01, D03, D11, D13, D33) return mu * 0.5 cpdef double mu1_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Return 3rd intrinsic volume of tetrahedron Compute the 3rd intrinsic volume (sum of external angles * edge lengths) of a tetrahedron for which the input arguments represent the coordinate dot products of the vertices. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu1 : float 3rd intrinsic volume of tetrahedron """ cdef double mu mu = 0 mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) return mu cdef inline double limited_acos(double val) nogil: """ Check for -1 <= val <= 1 before returning acos(val) Avoids nan values from small rounding errors """ if val >= 1: return 0 elif val <= -1: return PI return acos(val) @cython.cdivision(True) cpdef double _mu1_tetface(double Ds0s0, double Ds0s1, double Ds1s1, double Ds0t0, double Ds0t1, double Ds1t0, double Ds1t1, double Dt0t0, double Dt0t1, double Dt1t1) nogil: cdef double A00, A01, A02, A11, A12, A22, np_len, a, acosval cdef double length, norm_proj0, norm_proj1, inner_prod_proj A00 = Ds1s1 - 2 * Ds0s1 + Ds0s0 # all norms divided by this value, leading to NaN value for output, for # values <= 0 if A00 <= 0: return 0 A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 length = sqrt(A00) norm_proj0 = A11 - A01 * A01 / A00 norm_proj1 = A22 - A02 * A02 / A00 inner_prod_proj = A12 - A01 * A02 / A00 np_len = norm_proj0 * norm_proj1 if np_len <= 0: # would otherwise lead to NaN return value return 0 # hedge for small rounding errors above 1 and below -1 acosval = limited_acos(inner_prod_proj / sqrt(np_len)) a = (PI - acosval) * length / (2 * PI) return a cpdef double mu2_tri(double D00, double D01, double D02, double D11, double D12, double D22) nogil: """ Compute the 2nd intrinsic volume of triangle 2nd intrinsic volume (just area in this case) of a triangle with coordinates implied by the dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D22 : float ``cv2.dot(cv2)`` Returns ------- mu2 : float area of triangle """ cdef double C00, C01, C11, L C00 = D11 - 2*D01 + D00 C01 = D12 - D01 - D02 + D00 C11 = D22 - 2*D02 + D00 L = C00 * C11 - C01 * C01 # Negative area appeared to result from floating point errors on PPC if L < 0: return 0.0 return sqrt(L) * 0.5 cpdef double mu1_tri(double D00, double D01, double D02, double D11, double D12, double D22) nogil: """ Compute the 1st intrinsic volume of triangle 1st intrinsic volume (1/2 the perimeter) of a triangle with coordinates implied by the dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D22 : float ``cv2.dot(cv2)`` Returns ------- mu1 : float 1/2 perimeter of triangle """ cdef double mu = 0 mu += mu1_edge(D00, D01, D11) mu += mu1_edge(D00, D02, D22) mu += mu1_edge(D11, D12, D22) return mu * 0.5 cpdef double mu1_edge(double D00, double D01, double D11) nogil: """ Compute the 1st intrinsic volume (length) of line segment Length of a line segment with vertex coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the line start, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the line end. D11 : float ``cv1.dot(cv1)`` Returns ------- mu0 : float length of line segment """ return sqrt(D00 - 2*D01 + D11) def EC3d(mask): """ Compute Euler characteristic of region within `mask` Given a 3d `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into tetrahedra / triangles / edges / vertices, which are included based on whether all voxels in the tetrahedron / triangle / edge / vertex are in the mask or not. Parameters ---------- mask : ndarray shape (i,j,k) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- We check whether `mask` is binary. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ cdef: # 'flattened' mask (1d array) np.ndarray[np.uint8_t, ndim=1] fpmask # d3 and d4 are lists of triangles and tetrahedra # associated to particular voxels in the cube np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] d4 # scalars np.uint8_t m np.npy_intp i, j, k, l, s0, s1, s2, ds2, ds3, ds4, index, nvox np.npy_intp ss0, ss1, ss2 # strides np.ndarray[np.intp_t, ndim=1] strides np.npy_intp v0, v1, v2, v3 # vertices np.npy_intp l0 = 0 pmask_shape = np.array(mask.shape) + 1 s0, s1, s2 = pmask_shape[:3] pmask = np.zeros(pmask_shape, dtype=np.uint8) pmask[:-1, :-1, :-1] = check_cast_bin8(mask) fpmask = pmask.reshape(-1) strides = np.array(strides_from(pmask_shape, np.bool_), dtype=np.intp) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel union = join_complexes(*[cube_with_strides_center((0,0,1), strides), cube_with_strides_center((0,1,0), strides), cube_with_strides_center((0,1,1), strides), cube_with_strides_center((1,0,0), strides), cube_with_strides_center((1,0,1), strides), cube_with_strides_center((1,1,0), strides), cube_with_strides_center((1,1,1), strides)]) c = cube_with_strides_center((0,0,0), strides) d4 = np.array(list(c[4].difference(union[4]))) d3 = np.array(list(c[3].difference(union[3]))) d2 = np.array(list(c[2].difference(union[2]))) ds2 = d2.shape[0] ds3 = d3.shape[0] ds4 = d4.shape[0] ss0 = strides[0] ss1 = strides[1] ss2 = strides[2] nvox = mask.size for i in range(s0-1): for j in range(s1-1): for k in range(s2-1): index = i*ss0+j*ss1+k*ss2 for l in range(ds4): v0 = index + d4[l,0] m = fpmask[v0] if m: v1 = index + d4[l,1] v2 = index + d4[l,2] v3 = index + d4[l,3] m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] l0 = l0 - m for l in range(ds3): v0 = index + d3[l,0] m = fpmask[v0] if m: v1 = index + d3[l,1] v2 = index + d3[l,2] m = m * fpmask[v1] * fpmask[v2] l0 = l0 + m for l in range(ds2): v0 = index + d2[l,0] m = fpmask[v0] if m: v1 = index + d2[l,1] m = m * fpmask[v1] l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype return l0 + fpmask.sum().astype(int) def Lips3d(coords, mask): """ Estimated intrinsic volumes within masked region given coordinates Given a 3d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into tetrahedra / triangles / edges / vertices, which are included based on whether all voxels in the tetrahedron / triangle / edge / vertex are in the mask or not. Parameters ---------- coords : ndarray shape (N, i, j, k) Coordinates for the voxels in the mask. ``N`` will often be 3 (for 3 dimensional coordinates), but can be any integer > 0 mask : ndarray shape (i, j, k) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1, mu2, mu3], being, respectively: #. Euler characteristic #. 2 * mean caliper diameter #. 0.5 * surface area #. Volume. Notes ----- We check whether `mask` is binary. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape != coords.shape[1:]: raise ValueError('shape of mask does not match coordinates') # if the data can be squeezed, we must use the lower dimensional function mask = np.squeeze(mask) if mask.ndim < 3: value = np.zeros(4) coords = coords.reshape((coords.shape[0],) + mask.shape) if mask.ndim == 2: value[:3] = Lips2d(coords, mask) elif mask.ndim == 1: value[:2] = Lips1d(coords, mask) return value cdef: # c-level versions of the arrays # 'flattened' coords (2d array) np.ndarray[np.float_t, ndim=2] fcoords np.ndarray[np.float_t, ndim=2] D # 'flattened' mask (1d array) np.ndarray[np.uint8_t, ndim=1] fmask np.ndarray[np.uint8_t, ndim=1] fpmask # d3 and d4 are lists of triangles and tetrahedra # associated to particular voxels in the cube np.ndarray[np.intp_t, ndim=2] d4 np.ndarray[np.intp_t, ndim=2] m4 np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] m3 np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] m2 np.ndarray[np.intp_t, ndim=1] cvertices # scalars np.uint8_t m, mr, ms np.npy_intp i, j, k, l, s0, s1, s2, ds4, ds3, ds2 np.npy_intp index, pindex, nvox, r, s, rr, ss np.npy_intp ss0, ss1, ss2 # strides np.npy_intp ss0d, ss1d, ss2d # strides np.npy_intp v0, v1, v2, v3 # vertices for mask np.npy_intp w0, w1, w2, w3 # vertices for data double l0, l1, l2, l3 double res coords = coords.astype(np.float64) mask = check_cast_bin8(mask) l0 = 0; l1 = 0; l2 = 0; l3 = 0 pmask_shape = np.array(mask.shape) + 1 s0, s1, s2 = pmask_shape[:3] pmask = np.zeros(pmask_shape, np.uint8) pmask[:-1, :-1, :-1] = mask fpmask = pmask.reshape(-1) fmask = mask.reshape(-1).astype(np.uint8) fcoords = coords.reshape((coords.shape[0], -1)) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel # The mask is copied into a larger array, hence it will have different # strides than the data cdef: np.ndarray[np.intp_t, ndim=1] strides np.ndarray[np.intp_t, ndim=1] dstrides strides = np.array(strides_from(pmask_shape, np.bool_), dtype=np.intp) dstrides = np.array(strides_from(mask.shape, np.bool_), dtype=np.intp) ss0, ss1, ss2 = strides[0], strides[1], strides[2] ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] verts = [] for i in range(2): for j in range(2): for k in range(2): verts.append(ss0d * i + ss1d * j + ss2d * k) cvertices = np.array(sorted(verts), np.intp) union = join_complexes(*[cube_with_strides_center((0,0,1), strides), cube_with_strides_center((0,1,0), strides), cube_with_strides_center((0,1,1), strides), cube_with_strides_center((1,0,0), strides), cube_with_strides_center((1,0,1), strides), cube_with_strides_center((1,1,0), strides), cube_with_strides_center((1,1,1), strides)]) c = cube_with_strides_center((0,0,0), strides) m4 = np.array(list(c[4].difference(union[4]))) m3 = np.array(list(c[3].difference(union[3]))) m2 = np.array(list(c[2].difference(union[2]))) d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) d4 = np.hstack([m4, d4]) ds4 = d4.shape[0] d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) d3 = np.hstack([m3, d3]) ds3 = d3.shape[0] d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) d2 = np.hstack([m2, d2]) ds2 = d2.shape[0] nvox = mask.size D = np.zeros((8,8)) for i in range(s0-1): for j in range(s1-1): for k in range(s2-1): pindex = i*ss0+j*ss1+k*ss2 index = i*ss0d+j*ss1d+k*ss2d for r in range(8): rr = (index+cvertices[r]) % nvox mr = fmask[rr] for s in range(r+1): res = 0 ss = (index+cvertices[s]) % nvox ms = fmask[ss] if mr * ms: for l in range(fcoords.shape[0]): res += fcoords[l,ss] * fcoords[l,rr] D[r,s] = res D[s,r] = res else: D[r,s] = 0 D[s,r] = 0 for l in range(ds4): v0 = pindex + d4[l,0] w0 = d4[l,4] m = fpmask[v0] if m: v1 = pindex + d4[l,1] v2 = pindex + d4[l,2] v3 = pindex + d4[l,3] w1 = d4[l,5] w2 = d4[l,6] w3 = d4[l,7] m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l0 = l0 - m for l in range(ds3): v0 = pindex + d3[l,0] w0 = d3[l,3] m = fpmask[v0] if m: v1 = pindex + d3[l,1] v2 = pindex + d3[l,2] w1 = d3[l,4] w2 = d3[l,5] m = m * fpmask[v1] * fpmask[v2] l2 = l2 + m * mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) l1 = l1 - m * mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) l0 = l0 + m for l in range(ds2): v0 = pindex + d2[l,0] w0 = d2[l,2] m = fpmask[v0] if m: v1 = pindex + d2[l,1] w1 = d2[l,3] m = m * fpmask[v1] l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype l0 += fpmask.sum().astype(int) return np.array([l0, l1, l2, l3]) def _convert_stride3(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] v -= v0 * stride1[0] v1 = v // stride1[1] v2 = v - v1 * stride1[1] return v0*stride2[0] + v1*stride2[1] + v2*stride2[2] def _convert_stride2(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] v1 = v - v0 * stride1[0] return v0*stride2[0] + v1*stride2[1] def _convert_stride1(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] return v0 * stride2[0] def Lips2d(coords, mask): """ Estimate intrinsic volumes for 2d region in `mask` given `coords` Given a 2d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into triangles / edges / vertices, which are included based on whether all voxels in the triangle / edge / vertex are in the mask or not. Parameters ---------- coords : ndarray shape (N, i, j) Coordinates for the voxels in the mask. ``N`` will often be 2 (for 2 dimensional coordinates), but can be any integer > 0 mask : ndarray shape (i, j) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1, mu2], being, respectively: #. Euler characteristic #. 2 * mean caliper diameter #. Area. Notes ----- We check whether `mask` is binary. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape != coords.shape[1:]: raise ValueError('shape of mask does not match coordinates') # if the data can be squeezed, we must use the lower dimensional function if mask.ndim == 1: value = np.zeros(3) coords = coords.reshape((coords.shape[0],) + mask.shape) value[:2] = Lips1d(coords, mask) return value cdef: # c-level versions of the arrays # 'flattened' coords (2d array) np.ndarray[np.float_t, ndim=2] fcoords np.ndarray[np.float_t, ndim=2] D # 'flattened' mask (1d array) np.ndarray[np.uint8_t, ndim=1] fmask np.ndarray[np.uint8_t, ndim=1] fpmask # d2 and d3 are lists of triangles associated to particular voxels in # the square np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=1] cvertices # scalars np.npy_uint8 m, mr, ms np.npy_intp i, j, k, l, r, s, rr, ss, s0, s1 np.npy_intp ds2, ds3, index, npix, pindex np.npy_intp ss0, ss1, ss0d, ss1d # strides np.npy_intp v0, v1, v2 # vertices np.npy_intp w0, w1, w2 double l0, l1, l2 double res coords = coords.astype(np.float64) mask = check_cast_bin8(mask) l0 = 0; l1 = 0; l2 = 0 pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.uint8) pmask[:-1, :-1] = mask s0, s1 = pmask.shape[:2] fpmask = pmask.reshape(-1) fmask = mask.reshape(-1).astype(np.uint8) fcoords = coords.reshape((coords.shape[0], -1)) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel # The mask is copied into a larger array, hence it will have different # strides than the data cdef: np.ndarray[np.intp_t, ndim=1] strides np.ndarray[np.intp_t, ndim=1] dstrides strides = np.array(strides_from(pmask_shape, np.bool_), dtype=np.intp) dstrides = np.array(strides_from(mask.shape, np.bool_), dtype=np.intp) ss0, ss1 = strides[0], strides[1] ss0d, ss1d = dstrides[0], dstrides[1] verts = [] for i in range(2): for j in range(2): verts.append(ss0d * i + ss1d * j) cvertices = np.array(sorted(verts), np.intp) union = join_complexes(*[cube_with_strides_center((0,1), strides), cube_with_strides_center((1,0), strides), cube_with_strides_center((1,1), strides)]) c = cube_with_strides_center((0,0), strides) m3 = np.array(list(c[3].difference(union[3]))) m2 = np.array(list(c[2].difference(union[2]))) d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) d3 = np.hstack([m3, d3]) ds3 = d3.shape[0] d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) d2 = np.hstack([m2, d2]) ds2 = d2.shape[0] D = np.zeros((4,4)) npix = mask.size for i in range(s0-1): for j in range(s1-1): pindex = i*ss0+j*ss1 index = i*ss0d+j*ss1d for r in range(4): rr = (index+cvertices[r]) % npix mr = fmask[rr] for s in range(r+1): res = 0 ss = (index+cvertices[s]) % npix ms = fmask[ss] if mr * ms: for l in range(fcoords.shape[0]): res += fcoords[l,ss] * fcoords[l,rr] D[r, s] = res D[s, r] = res else: D[r, s] = 0 D[s, r] = 0 for l in range(ds3): v0 = pindex + d3[l,0] w0 = d3[l,3] m = fpmask[v0] if m: v1 = pindex + d3[l,1] v2 = pindex + d3[l,2] w1 = d3[l,4] w2 = d3[l,5] m = m * fpmask[v1] * fpmask[v2] l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) * m l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) * m l0 = l0 + m for l in range(ds2): v0 = pindex + d2[l,0] w0 = d2[l,2] m = fpmask[v0] if m: v1 = pindex + d2[l,1] w1 = d2[l,3] m = m * fpmask[v1] l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype l0 += fpmask.sum().astype(int) return np.array([l0,l1,l2]) def EC2d(mask): """ Compute Euler characteristic of 2D region in `mask` Given a 2d `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into triangles / edges / vertices, which are included based on whether all voxels in the triangle / edge / vertex are in the mask or not. Parameters ---------- mask : ndarray shape (i, j) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- We check whether `mask` is binary. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ cdef: # c-level versions of the array # 'flattened' mask (1d array) np.ndarray[np.uint8_t, ndim=1] fpmask # d2 and d3 are lists of triangles and tetrahedra # associated to particular voxels in the cube np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] d3 # scalars np.uint64_t m np.npy_intp i, j, k, l, s0, s1, ds2, ds3, index np.ndarray[np.intp_t, ndim=1] strides np.npy_intp ss0, ss1 # strides np.npy_intp v0, v1 # vertices np.npy_intp l0 = 0 mask = check_cast_bin8(mask) pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.uint8) pmask[:-1, :-1] = mask s0, s1 = pmask.shape[:2] fpmask = pmask.reshape(-1) strides = np.array(strides_from(pmask_shape, np.bool_), dtype=np.intp) ss0, ss1 = strides[0], strides[1] # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel union = join_complexes(*[cube_with_strides_center((0,1), strides), cube_with_strides_center((1,0), strides), cube_with_strides_center((1,1), strides)]) c = cube_with_strides_center((0,0), strides) d3 = np.array(list(c[3].difference(union[3]))) d2 = np.array(list(c[2].difference(union[2]))) ds2 = d2.shape[0] ds3 = d3.shape[0] for i in range(s0-1): for j in range(s1-1): index = i*ss0+j*ss1 for l in range(ds3): v0 = index + d3[l,0] m = fpmask[v0] if m and v0: v1 = index + d3[l,1] v2 = index + d3[l,2] m = m * fpmask[v1] * fpmask[v2] l0 = l0 + m for l in range(ds2): v0 = index + d2[l,0] m = fpmask[v0] if m: v1 = index + d2[l,1] m = m * fpmask[v1] l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype l0 += fpmask.sum().astype(int) return l0 def Lips1d(coords, mask): """ Estimate intrinsic volumes for 1D region in `mask` given `coords` Given a 1d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into edges / vertices, which are included based on whether all voxels in the edge / vertex are in the mask or not. Parameters ---------- coords : ndarray shape (N, i) Coordinates for the voxels in the mask. ``N`` will often be 1 (for 1 dimensional coordinates), but can be any integer > 0 mask : ndarray shape (i,) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1], being, respectively: #. Euler characteristic #. Line segment length Notes ----- We check whether `mask` is binary. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape[0] != coords.shape[1]: raise ValueError('shape of mask does not match coordinates') cdef: np.ndarray[np.uint8_t, ndim=1] mask_c np.ndarray[np.float_t, ndim=2] coords_c np.uint8_t m, mr, ms np.npy_intp i, l, r, s, rr, ss, s0, index double l0, l1 double res coords_c = coords.astype(np.float64) mask_c = check_cast_bin8(mask) l0 = 0; l1 = 0 s0 = mask_c.shape[0] D = np.zeros((2,2)) for i in range(s0): for r in range(2): rr = (i+r) % s0 mr = mask_c[rr] for s in range(r+1): res = 0 ss = (i+s) % s0 ms = mask_c[ss] if mr * ms * ((i+r) < s0) * ((i+s) < s0): for l in range(coords_c.shape[0]): res += coords_c[l,ss] * coords_c[l,rr] D[r,s] = res D[s,r] = res else: D[r,s] = 0 D[s,r] = 0 m = mask_c[i] if m: m = m * (mask_c[(i+1) % s0] * ((i+1) < s0)) l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) l0 = l0 - m # mask_c has the same sum as mask, but with predictable dtype l0 += mask_c.sum().astype(int) return np.array([l0, l1]) def EC1d(mask): """ Compute Euler characteristic for 1d `mask` Given a 1d mask `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into edges / vertices, which are included based on whether all voxels in the edge / vertex are in the mask or not. Parameters ---------- mask : ndarray shape (i,) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- We check whether the array mask is binary. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. Raises ------ ValueError If any value in the mask is outside {0, 1} References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ cdef: np.ndarray[np.uint8_t, ndim=1] mask_c np.uint8_t m np.npy_intp i, s0 double l0 = 0 mask_c = check_cast_bin8(mask) s0 = mask_c.shape[0] for i in range(s0): m = mask_c[i] if m: m = m * (mask_c[(i+1) % s0] * ((i+1) < s0)) l0 = l0 - m # mask_c has the same sum as mask, but with predictable dtype l0 += mask_c.sum().astype(int) return l0 nipy-0.6.1/nipy/algorithms/statistics/meson.build000066400000000000000000000016531470056100100221600ustar00rootroot00000000000000target_dir = 'nipy/algorithms/statistics' extensions = [ 'intvol', 'histogram' ] foreach ext: extensions py.extension_module(ext, cython_gen.process(ext + '.pyx'), c_args: cython_c_args, include_directories: [incdir_numpy], install: true, subdir: target_dir ) endforeach py.extension_module('_quantile', [ cython_gen.process('_quantile.pyx'), 'quantile.c', ], c_args: cython_c_args, include_directories: ['.', incdir_numpy], install: true, subdir: target_dir ) python_sources = [ '__init__.py', 'api.py', 'bayesian_mixed_effects.py', 'empirical_pvalue.py', 'mixed_effects_stat.py', 'onesample.py', 'rft.py', 'utils.py' ] py.install_sources( python_sources, pure: false, subdir: target_dir ) pure_subdirs = [ 'bench', 'formula', 'models', 'tests' ] foreach subdir: pure_subdirs install_subdir(subdir, install_dir: install_root / target_dir) endforeach nipy-0.6.1/nipy/algorithms/statistics/mixed_effects_stat.py000066400000000000000000000301411470056100100242220ustar00rootroot00000000000000""" Module for computation of mixed effects statistics with an EM algorithm. i.e. solves problems of the form y = X beta + e1 + e2, where X and Y are known, e1 and e2 are centered with diagonal covariance. V1 = var(e1) is known, and V2 = var(e2) = lambda identity. the code estimates beta and lambda using an EM algorithm. Likelihood ratio tests can then be used to test the columns of beta. Author: Bertrand Thirion, 2012. >>> N, P = 15, 500 >>> V1 = np.random.randn(N, P) ** 2 >>> effects = np.ones(P) >>> Y = generate_data(np.ones(N), effects, .25, V1) >>> T1 = one_sample_ttest(Y, V1, n_iter=5) >>> T2 = t_stat(Y) >>> assert(T1.std() < T2.std()) """ import numpy as np EPS = 100 * np.finfo(float).eps def generate_data(X, beta, V2, V1): """ Generate a group of individuals from the provided parameters Parameters ---------- X: array of shape (n_samples, n_reg), the design matrix of the model beta: float or array of shape (n_reg, n_tests), the associated effects V2: float or array of shape (n_tests), group variance V1: array of shape(n_samples, n_tests), the individual variances Returns ------- Y: array of shape(n_samples, n_tests) the individual data related to the two-level normal model """ # check that the variances are positive if (V1 < 0).any(): raise ValueError('Variance should be positive') Y = np.random.randn(*V1.shape) Y *= np.sqrt(V2 + V1) if X.ndim == 1: X = X[:, np.newaxis] if np.isscalar(beta): beta = beta * np.ones((X.shape[1], V1.shape[1])) if beta.ndim == 1: beta = beta[np.newaxis] Y += np.dot(X, beta) return Y def check_arrays(Y, V1): """Check that the given data can be used for the models Parameters ---------- Y: array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1: array of shape (n_samples, n_tests) or (n_samples) first-level variance """ if (V1 < 0).any(): raise ValueError("a negative variance has been provided") if np.size(Y) == Y.shape[0]: Y = Y[:, np.newaxis] if np.size(V1) == V1.shape[0]: V1 = V1[:, np.newaxis] if Y.shape != V1.shape: raise ValueError("Y and V1 do not have the same shape") return Y, V1 def t_stat(Y): """ Returns the t stat of the sample on each row of the matrix Parameters ---------- Y, array of shape (n_samples, n_tests) Returns ------- t_variates, array of shape (n_tests) """ return Y.mean(0) / Y.std(0) * np.sqrt(Y.shape[0] - 1) class MixedEffectsModel: """Class to handle multiple one-sample mixed effects models """ def __init__(self, X, n_iter=5, verbose=False): """ Set the effects and first-level variance, and initialize related quantities Parameters ---------- X: array of shape(n_samples, n_effects), the design matrix n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode """ self.n_iter = n_iter self.verbose = verbose self.X = X self.pinv_X = np.linalg.pinv(X) def log_like(self, Y, V1): """ Compute the log-likelihood of (Y, V1) under the model Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance Returns ------- logl: array of shape self.n_tests, the log-likelihood of the model """ Y, V1 = check_arrays(Y, V1) tvar = self.V2 + V1 logl = np.sum(((Y - self.Y_) ** 2) / tvar, 0) logl += np.sum(np.log(tvar), 0) logl += np.log(2 * np.pi) * Y.shape[0] logl *= (- 0.5) return logl def predict(self, Y, V1): """Return the log_likelihood of the data.See the log_like method""" return self.log_like(Y, V1) def score(self, Y, V1): """Return the log_likelihood of the data. See the log_like method""" return self.log_like(Y, V1) def _one_step(self, Y, V1): """Applies one step of an EM algorithm to estimate self.mean_, self.var Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance """ # E step prec = 1. / (self.V2 + V1) Y_ = prec * (self.V2 * Y + V1 * self.Y_) cvar = V1 * self.V2 * prec # M step self.beta_ = np.dot(self.pinv_X, Y_) self.Y_ = np.dot(self.X, self.beta_) self.V2 = np.mean((Y_ - self.Y_) ** 2, 0) + cvar.mean(0) def fit(self, Y, V1): """ Launches the EM algorithm to estimate self Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance Returns ------- self """ # Basic data checks if self.X.shape[0] != Y.shape[0]: raise ValueError('X and Y must have the same numbers of rows') Y, V1 = check_arrays(Y, V1) self.beta_ = np.dot(self.pinv_X, Y) self.Y_ = np.dot(self.X, self.beta_) self.V2 = np.mean((Y - self.Y_) ** 2, 0) if self.verbose: log_like_init = self.log_like(Y, V1) print('Average log-likelihood: ', log_like_init.mean()) for i in range(self.n_iter): self._one_step(Y, V1) if self.verbose: log_like_ = self.log_like(Y, V1) if (log_like_ < (log_like_init - EPS)).any(): raise ValueError('The log-likelihood cannot decrease') log_like_init = log_like_ print('Iteration %d, average log-likelihood: %f' % ( i, log_like_.mean())) return self def two_sample_ftest(Y, V1, group, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance associated with the data group: array of shape (n_samples) a vector of indicators yielding the samples membership n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ # check that group is correct if group.size != Y.shape[0]: raise ValueError('The number of labels is not the number of samples') if (np.unique(group) != np.array([0, 1])).all(): raise ValueError('group should be composed only of zeros and ones') # create design matrices X = np.vstack((np.ones_like(group), group)).T return mfx_stat(Y, V1, X, 1, n_iter=n_iter, verbose=verbose, return_t=False, return_f=True)[0] def two_sample_ttest(Y, V1, group, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance associated with the data group: array of shape (n_samples) a vector of indicators yielding the samples membership n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ X = np.vstack((np.ones_like(group), group)).T return mfx_stat(Y, V1, X, 1, n_iter=n_iter, verbose=verbose, return_t=True)[0] def one_sample_ftest(Y, V1, n_iter=5, verbose=False): """Returns the mixed effects F-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance ssociated with the data n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- fstat, array of shape (n_tests), statistical values obtained from the likelihood ratio test sign, array of shape (n_tests), sign of the mean for each test (allow for post-hoc signed tests) """ return mfx_stat(Y, V1, np.ones((Y.shape[0], 1)), 0, n_iter=n_iter, verbose=verbose, return_t=False, return_f=True)[0] def one_sample_ttest(Y, V1, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the observations V1: array of shape (n_samples, n_tests) first-level variance associated with the observations n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ return mfx_stat(Y, V1, np.ones((Y.shape[0], 1)), 0, n_iter=n_iter, verbose=verbose, return_t=True)[0] def mfx_stat(Y, V1, X, column, n_iter=5, return_t=True, return_f=False, return_effect=False, return_var=False, verbose=False): """Run a mixed-effects model test on the column of the design matrix Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance associated with the data X: array of shape(n_samples, n_regressors) the design matrix of the model column: int, index of the column of X to be tested n_iter: int, optional, number of iterations of the EM algorithm return_t: bool, optional, should one return the t test (True by default) return_f: bool, optional, should one return the F test (False by default) return_effect: bool, optional, should one return the effect estimate (False by default) return_var: bool, optional, should one return the variance estimate (False by default) verbose: bool, optional, verbosity mode Returns ------- (tstat, fstat, effect, var): tuple of arrays of shape (n_tests), those required by the input return booleans """ # check that X/columns are correct column = int(column) if X.shape[0] != Y.shape[0]: raise ValueError('X.shape[0] is not the number of samples') if (column > X.shape[1]): raise ValueError('the column index is more than the number of columns') # create design matrices contrast_mask = 1 - np.eye(X.shape[1])[column] X0 = X * contrast_mask # instantiate the mixed effects models model_0 = MixedEffectsModel(X0, n_iter=n_iter, verbose=verbose).fit(Y, V1) model_1 = MixedEffectsModel(X, n_iter=n_iter, verbose=verbose).fit(Y, V1) # compute the log-likelihood ratio statistic fstat = 2 * (model_1.log_like(Y, V1) - model_0.log_like(Y, V1)) fstat = np.maximum(0, fstat) sign = np.sign(model_1.beta_[column]) output = () if return_t: output += (np.sqrt(fstat) * sign,) if return_f: output += (fstat,) if return_var: output += (model_1.V2,) if return_effect: output += (model_1.beta_[column],) return output nipy-0.6.1/nipy/algorithms/statistics/models/000077500000000000000000000000001470056100100212745ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/models/LICENSE.txt000066400000000000000000000025521470056100100231230ustar00rootroot00000000000000Copyright (C) 2006, Jonathan E. Taylor Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nipy-0.6.1/nipy/algorithms/statistics/models/TODO.txt000066400000000000000000000024751470056100100226120ustar00rootroot00000000000000TODO for scipy.stats.models =========================== In converting the bspline.so from a weave build to a C extension, we found several things that should be fixed or looked into more thoroughly. Hopefully we can dedicate some time to this effort at the Scipy Conf 2008. However, many of these items should be addressed before stats.models goes into a release of scipy. Items ----- * Run pychecker on the stats.models and fix numerous errors. There are import errors, undefined globals, undefined attrs, etc... Running the command below in stats/models produced 140+ errors.:: # Run pychecker on all python modules except __init__.py $ grind "[a-z|_][a-z]*.py" | xargs pychecker * Address the FIXME issues in the code. * Determine and cleanup the public API. Functions/classes used internally should be private (leading underscore). Public functions should be obvious and documented. Packaging should be reviewed and cleaned up. * Update documentation to scipy standards. Especially adding example sections showing how to use the public functions. * Tests! Robust tests are needed! Of the subset of tests we looked at, most only checked attribute setting, not the results of applying the function to data. * Remove code duplication. smoothers.py and bspline.py define SmoothingSpline class. nipy-0.6.1/nipy/algorithms/statistics/models/__init__.py000066400000000000000000000003741470056100100234110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ models - Statistical Models """ __docformat__ = 'restructuredtext' from . import glm, model, regression from .info import __doc__ nipy-0.6.1/nipy/algorithms/statistics/models/family/000077500000000000000000000000001470056100100225555ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/models/family/__init__.py000066400000000000000000000007401470056100100246670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' This module contains the one-parameter exponential families used for fitting GLMs and GAMs. These families are described in P. McCullagh and J. A. Nelder. "Generalized linear models." Monographs on Statistics and Applied Probability. Chapman & Hall, London, 1983. ''' from .family import Binomial, Family, Gamma, Gaussian, InverseGaussian, Poisson nipy-0.6.1/nipy/algorithms/statistics/models/family/family.py000066400000000000000000000132671470056100100244210ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from . import links as L from . import varfuncs as V class Family: """ A class to model one-parameter exponential families. INPUTS: link -- a Link instance variance -- a variance function (models means as a function of mean) """ valid = [-np.inf, np.inf] tol = 1.0e-05 links = [] def _setlink(self, link): self._link = link if hasattr(self, "links"): if link not in self.links: raise ValueError( f'invalid link for family, should be in {self.links}') def _getlink(self): return self._link link = property(_getlink, _setlink) def __init__(self, link, variance): self.link = link self.variance = variance def weights(self, mu): """ Weights for IRLS step. w = 1 / (link'(mu)**2 * variance(mu)) INPUTS: mu -- mean parameter in exponential family OUTPUTS: w -- weights used in WLS step of GLM/GAM fit """ return 1. / (self.link.deriv(mu)**2 * self.variance(mu)) def deviance(self, Y, mu, scale=1.): """ Deviance of (Y,mu) pair. Deviance is usually defined as the difference DEV = (SUM_i -2 log Likelihood(Y_i,mu_i) + 2 log Likelihood(mu_i,mu_i)) / scale INPUTS: Y -- response variable mu -- mean parameter scale -- optional scale in denominator of deviance OUTPUTS: dev dev -- DEV, as described above """ return np.power(self.devresid(Y, mu), 2).sum() / scale def devresid(self, Y, mu): """ The deviance residuals, defined as the residuals in the deviance. Without knowing the link, they default to Pearson residuals resid_P = (Y - mu) * sqrt(weight(mu)) INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ return (Y - mu) * np.sqrt(self.weights(mu)) def fitted(self, eta): """ Fitted values based on linear predictors eta. INPUTS: eta -- values of linear predictors, say, X beta in a generalized linear model. OUTPUTS: mu mu -- link.inverse(eta), mean parameter based on eta """ return self.link.inverse(eta) def predict(self, mu): """ Linear predictors based on given mu values. INPUTS: mu -- mean parameter of one-parameter exponential family OUTPUTS: eta eta -- link(mu), linear predictors, based on mean parameters mu """ return self.link(mu) class Poisson(Family): """ Poisson exponential family. INPUTS: link -- a Link instance """ links = [L.log, L.identity, L.sqrt] variance = V.mu valid = [0, np.inf] def __init__(self, link=L.log): self.variance = Poisson.variance self.link = link def devresid(self, Y, mu): """ Poisson deviance residual INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ return np.sign(Y - mu) * np.sqrt(2 * Y * np.log(Y / mu) - 2 * (Y - mu)) class Gaussian(Family): """ Gaussian exponential family. INPUTS: link -- a Link instance """ links = [L.log, L.identity, L.inverse] variance = V.constant def __init__(self, link=L.identity): self.variance = Gaussian.variance self.link = link def devresid(self, Y, mu, scale=1.): """ Gaussian deviance residual INPUTS: Y -- response variable mu -- mean parameter scale -- optional scale in denominator (after taking sqrt) OUTPUTS: resid resid -- deviance residuals """ return (Y - mu) / np.sqrt(self.variance(mu) * scale) class Gamma(Family): """ Gamma exponential family. INPUTS: link -- a Link instance BUGS: no deviance residuals? """ links = [L.log, L.identity, L.inverse] variance = V.mu_squared def __init__(self, link=L.identity): self.variance = Gamma.variance self.link = link class Binomial(Family): """ Binomial exponential family. INPUTS: link -- a Link instance n -- number of trials for Binomial """ links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog] variance = V.binary def __init__(self, link=L.logit, n=1): self.n = n self.variance = V.Binomial(n=self.n) self.link = link def devresid(self, Y, mu): """ Binomial deviance residual INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ mu = self.link.clean(mu) return np.sign(Y - mu) * np.sqrt(-2 * (Y * np.log(mu / self.n) + (self.n - Y) * np.log(1 - mu / self.n))) class InverseGaussian(Family): """ InverseGaussian exponential family. INPUTS: link -- a Link instance n -- number of trials for Binomial """ links = [L.inverse_squared, L.inverse, L.identity, L.log] variance = V.mu_cubed def __init__(self, link=L.identity): self.n = n self.variance = InverseGaussian.variance self.link = link nipy-0.6.1/nipy/algorithms/statistics/models/family/links.py000066400000000000000000000150531470056100100242530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats class Link: """ A generic link function for one-parameter exponential family, with call, inverse and deriv methods. """ def initialize(self, Y): return np.asarray(Y).mean() * np.ones(Y.shape) def __call__(self, p): return NotImplementedError def inverse(self, z): return NotImplementedError def deriv(self, p): return NotImplementedError class Logit(Link): """ The logit transform as a link function: g'(x) = 1 / (x * (1 - x)) g^(-1)(x) = exp(x)/(1 + exp(x)) """ tol = 1.0e-10 def clean(self, p): """ Clip logistic values to range (tol, 1-tol) INPUTS: p -- probabilities OUTPUTS: pclip pclip -- clipped probabilities """ return np.clip(p, Logit.tol, 1. - Logit.tol) def __call__(self, p): """ Logit transform g(p) = log(p / (1 - p)) INPUTS: p -- probabilities OUTPUTS: z z -- logit transform of p """ p = self.clean(p) return np.log(p / (1. - p)) def inverse(self, z): """ Inverse logit transform h(z) = exp(z)/(1+exp(z)) INPUTS: z -- logit transform of p OUTPUTS: p p -- probabilities """ t = np.exp(z) return t / (1. + t) def deriv(self, p): """ Derivative of logit transform g(p) = 1 / (p * (1 - p)) INPUTS: p -- probabilities OUTPUTS: y y -- derivative of logit transform of p """ p = self.clean(p) return 1. / (p * (1 - p)) logit = Logit() class Power(Link): """ The power transform as a link function: g(x) = x**power """ def __init__(self, power=1.): self.power = power def __call__(self, x): """ Power transform g(x) = x**self.power INPUTS: x -- mean parameters OUTPUTS: z z -- power transform of x """ return np.power(x, self.power) def inverse(self, z): """ Inverse of power transform g(x) = x**(1/self.power) INPUTS: z -- linear predictors in GLM OUTPUTS: x x -- mean parameters """ return np.power(z, 1. / self.power) def deriv(self, x): """ Derivative of power transform g(x) = self.power * x**(self.power - 1) INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of power transform of x """ return self.power * np.power(x, self.power - 1) inverse = Power(power=-1.) inverse.__doc__ = """ The inverse transform as a link function: g(x) = 1 / x """ sqrt = Power(power=0.5) sqrt.__doc__ = """ The square-root transform as a link function: g(x) = sqrt(x) """ inverse_squared = Power(power=-2.) inverse_squared.__doc__ = """ The inverse squared transform as a link function: g(x) = 1 / x**2 """ identity = Power(power=1.) identity.__doc__ = """ The identity transform as a link function: g(x) = x """ class Log(Link): """ The log transform as a link function: g(x) = log(x) """ tol = 1.0e-10 def clean(self, x): return np.clip(x, Logit.tol, np.inf) def __call__(self, x, **extra): """ Log transform g(x) = log(x) INPUTS: x -- mean parameters OUTPUTS: z z -- log(x) """ x = self.clean(x) return np.log(x) def inverse(self, z): """ Inverse of log transform g(x) = exp(x) INPUTS: z -- linear predictors in GLM OUTPUTS: x x -- exp(z) """ return np.exp(z) def deriv(self, x): """ Derivative of log transform g(x) = 1/x INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of log transform of x """ x = self.clean(x) return 1. / x log = Log() class CDFLink(Logit): """ The use the CDF of a scipy.stats distribution as a link function: g(x) = dbn.ppf(x) """ def __init__(self, dbn=scipy.stats.norm): self.dbn = dbn def __call__(self, p): """ CDF link g(p) = self.dbn.pdf(p) INPUTS: p -- mean parameters OUTPUTS: z z -- derivative of CDF transform of p """ p = self.clean(p) return self.dbn.ppf(p) def inverse(self, z): """ Derivative of CDF link g(z) = self.dbn.cdf(z) INPUTS: z -- linear predictors in GLM OUTPUTS: p p -- inverse of CDF link of z """ return self.dbn.cdf(z) def deriv(self, p): """ Derivative of CDF link g(p) = 1/self.dbn.pdf(self.dbn.ppf(p)) INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of CDF transform of x """ p = self.clean(p) return 1. / self.dbn.pdf(self(p)) probit = CDFLink() probit.__doc__ = """ The probit (standard normal CDF) transform as a link function: g(x) = scipy.stats.norm.ppf(x) """ cauchy = CDFLink(dbn=scipy.stats.cauchy) cauchy.__doc__ = """ The Cauchy (standard Cauchy CDF) transform as a link function: g(x) = scipy.stats.cauchy.ppf(x) """ class CLogLog(Logit): """ The complementary log-log transform as a link function: g(x) = log(-log(x)) """ def __call__(self, p): """ C-Log-Log transform g(p) = log(-log(p)) INPUTS: p -- mean parameters OUTPUTS: z z -- log(-log(p)) """ p = self.clean(p) return np.log(-np.log(p)) def inverse(self, z): """ Inverse of C-Log-Log transform g(z) = exp(-exp(z)) INPUTS: z -- linear predictor scale OUTPUTS: p p -- mean parameters """ return np.exp(-np.exp(z)) def deriv(self, p): """ Derivatve of C-Log-Log transform g(p) = - 1 / (log(p) * p) INPUTS: p -- mean parameters OUTPUTS: z z -- - 1 / (log(p) * p) """ p = self.clean(p) return -1. / (np.log(p) * p) cloglog = CLogLog() nipy-0.6.1/nipy/algorithms/statistics/models/family/varfuncs.py000066400000000000000000000033061470056100100247600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __docformat__ = 'restructuredtext' import numpy as np class VarianceFunction: """ Variance function that relates the variance of a random variable to its mean. Defaults to 1. """ def __call__(self, mu): """ Default variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- ones(mu.shape) """ return np.ones(mu.shape, np.float64) constant = VarianceFunction() class Power: """ Power variance function: V(mu) = fabs(mu)**power INPUTS: power -- exponent used in power variance function """ def __init__(self, power=1.): self.power = power def __call__(self, mu): """ Power variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- fabs(mu)**self.power """ return np.power(np.fabs(mu), self.power) class Binomial: """ Binomial variance function p = mu / n; V(mu) = p * (1 - p) * n INPUTS: n -- number of trials in Binomial """ tol = 1.0e-10 def __init__(self, n=1): self.n = n def clean(self, p): return np.clip(p, Binomial.tol, 1 - Binomial.tol) def __call__(self, mu): """ Binomial variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- mu / self.n * (1 - mu / self.n) * self.n """ p = self.clean(mu / self.n) return p * (1 - p) * self.n mu = Power() mu_squared = Power(power=2) mu_cubed = Power(power=3) binary = Binomial() nipy-0.6.1/nipy/algorithms/statistics/models/glm.py000066400000000000000000000050351470056100100224300ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General linear models -------------------- """ import numpy as np from . import family from .regression import WLSModel class Model(WLSModel): niter = 10 def __init__(self, design, family=family.Gaussian()): self.family = family super().__init__(design, weights=1) def __iter__(self): self.iter = 0 self.dev = np.inf return self def deviance(self, Y=None, results=None, scale=1.): """ Return (unnormalized) log-likelihood for GLM. Note that self.scale is interpreted as a variance in old_model, so we divide the residuals by its sqrt. """ if results is None: results = self.results if Y is None: Y = self.Y return self.family.deviance(Y, results.mu) / scale def __next__(self): results = self.results Y = self.Y self.weights = self.family.weights(results.mu) self.initialize(self.design) Z = results.predicted + self.family.link.deriv(results.mu) *\ (Y - results.mu) newresults = super().fit(Z) newresults.Y = Y newresults.mu = self.family.link.inverse(newresults.predicted) self.iter += 1 return newresults def cont(self, tol=1.0e-05): """ Continue iterating, or has convergence been obtained? """ if self.iter >= Model.niter: return False curdev = self.deviance(results=self.results) if np.fabs((self.dev - curdev) / curdev) < tol: return False self.dev = curdev return True def estimate_scale(self, Y=None, results=None): """ Return Pearson\'s X^2 estimate of scale. """ if results is None: results = self.results if Y is None: Y = self.Y resid = Y - results.mu return ((np.power(resid, 2) / self.family.variance(results.mu)).sum() / results.df_resid) def fit(self, Y): self.Y = np.asarray(Y, np.float64) iter(self) self.results = super().fit( self.family.link.initialize(Y)) self.results.mu = self.family.link.inverse(self.results.predicted) self.scale = self.results.scale = self.estimate_scale() while self.cont(): self.results = next(self) self.scale = self.results.scale = self.estimate_scale() return self.results nipy-0.6.1/nipy/algorithms/statistics/models/info.py000066400000000000000000000014121470056100100225770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Statistical models - model `formula` - standard `regression` models - `OLSModel` (ordinary least square regression) - `WLSModel` (weighted least square regression) - `ARModel` (autoregressive model) - `glm.Model` (generalized linear models) - robust statistical models - `rlm.Model` (robust linear models using M estimators) - `robust.norms` estimates - `robust.scale` estimates (MAD, Huber's proposal 2). - `mixed` effects models - `gam` (generalized additive models) """ __docformat__ = 'restructuredtext en' depends = ['special.orthogonal', 'integrate', 'optimize', 'linalg'] postpone_import = True nipy-0.6.1/nipy/algorithms/statistics/models/model.py000066400000000000000000000342421470056100100227530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from functools import cached_property import numpy as np from numpy.linalg import inv from scipy.stats import t as t_distribution from ...utils.matrices import pos_recipr # Inverse t cumulative distribution inv_t_cdf = t_distribution.ppf class Model: """ A (predictive) statistical model. The class Model itself does nothing but lays out the methods expected of any subclass. """ def __init__(self): pass def initialize(self): """ Initialize (possibly re-initialize) a Model instance. For instance, the design matrix of a linear model may change and some things must be recomputed. """ raise NotImplementedError def fit(self): """ Fit a model to data. """ raise NotImplementedError def predict(self, design=None): """ After a model has been fit, results are (assumed to be) stored in self.results, which itself should have a predict method. """ # XXX method is from an earlier API and needs to be rethought self.results.predict(design) class LikelihoodModel(Model): def logL(self, theta, Y, nuisance=None): """ Log-likelihood of model. """ raise NotImplementedError def score(self, theta, Y, nuisance=None): """ Gradient of logL with respect to theta. This is the score function of the model """ raise NotImplementedError def information(self, theta, nuisance=None): """ Fisher information matrix The inverse of the expected value of ``- d^2 logL / dtheta^2.`` """ raise NotImplementedError class LikelihoodModelResults: ''' Class to contain results from likelihood models ''' # This is the class in which things like AIC, BIC, llf can be implemented as # methods, not computed in, say, the fit method of OLSModel def __init__(self, theta, Y, model, cov=None, dispersion=1., nuisance=None, rank=None): ''' Set up results structure Parameters ---------- theta : ndarray parameter estimates from estimated model Y : ndarray data model : ``LikelihoodModel`` instance model used to generate fit cov : None or ndarray, optional covariance of thetas dispersion : scalar, optional multiplicative factor in front of `cov` nuisance : None of ndarray parameter estimates needed to compute logL rank : None or scalar rank of the model. If rank is not None, it is used for df_model instead of the usual counting of parameters. Notes ----- The covariance of thetas is given by: dispersion * cov For (some subset of models) `dispersion` will typically be the mean square error from the estimated model (sigma^2) ''' self.theta = theta self.Y = Y self.model = model if cov is None: self.cov = self.model.information(self.theta, nuisance=self.nuisance) else: self.cov = cov self.dispersion = dispersion self.nuisance = nuisance self.df_total = Y.shape[0] self.df_model = model.df_model # put this as a parameter of LikelihoodModel self.df_resid = self.df_total - self.df_model @cached_property def logL(self): """ The maximized log-likelihood """ return self.model.logL(self.theta, self.Y, nuisance=self.nuisance) @cached_property def AIC(self): """ Akaike Information Criterion """ p = self.theta.shape[0] return -2 * self.logL + 2 * p @cached_property def BIC(self): """ Schwarz's Bayesian Information Criterion """ n = self.Y.shape[0] p = self.theta.shape[0] return - 2 * self.logL + np.log(n) * p def t(self, column=None): """ Return the (Wald) t-statistic for a given parameter estimate. Use Tcontrast for more complicated (Wald) t-statistics. """ if column is None: column = list(range(self.theta.shape[0])) column = np.asarray(column) _theta = self.theta[column] _cov = self.vcov(column=column) if _cov.ndim == 2: _cov = np.diag(_cov) _t = _theta * pos_recipr(np.sqrt(_cov)) return _t def vcov(self, matrix=None, column=None, dispersion=None, other=None): """ Variance/covariance matrix of linear contrast Parameters ---------- matrix: (dim, self.theta.shape[0]) array, optional numerical contrast specification, where ``dim`` refers to the 'dimension' of the contrast i.e. 1 for t contrasts, 1 or more for F contrasts. column: int, optional alternative way of specifying contrasts (column index) dispersion: float or (n_voxels,) array, optional value(s) for the dispersion parameters other: (dim, self.theta.shape[0]) array, optional alternative contrast specification (?) Returns ------- cov: (dim, dim) or (n_voxels, dim, dim) array the estimated covariance matrix/matrices Returns the variance/covariance matrix of a linear contrast of the estimates of theta, multiplied by `dispersion` which will often be an estimate of `dispersion`, like, sigma^2. The covariance of interest is either specified as a (set of) column(s) or a matrix. """ if self.cov is None: raise ValueError('need covariance of parameters for computing ' '(unnormalized) covariances') if dispersion is None: dispersion = self.dispersion if column is not None: column = np.asarray(column) if column.shape == (): return self.cov[column, column] * dispersion else: return self.cov[column][:, column] * dispersion elif matrix is not None: if other is None: other = matrix tmp = np.dot(matrix, np.dot(self.cov, np.transpose(other))) if np.isscalar(dispersion): return tmp * dispersion else: return tmp[:, :, np.newaxis] * dispersion if matrix is None and column is None: return self.cov * dispersion def Tcontrast(self, matrix, store=('t', 'effect', 'sd'), dispersion=None): """ Compute a Tcontrast for a row vector `matrix` To get the t-statistic for a single column, use the 't' method. Parameters ---------- matrix : 1D array-like contrast matrix store : sequence, optional components of t to store in results output object. Defaults to all components ('t', 'effect', 'sd'). dispersion : None or float, optional Returns ------- res : ``TContrastResults`` object """ matrix = np.asarray(matrix) # 1D vectors assumed to be row vector if matrix.ndim == 1: matrix = matrix[None] if matrix.shape[0] != 1: raise ValueError("t contrasts should have only one row") if matrix.shape[1] != self.theta.shape[0]: raise ValueError("t contrasts should be length P=%d, " "but this is length %d" % (self.theta.shape[0], matrix.shape[1])) store = set(store) if not store.issubset(('t', 'effect', 'sd')): raise ValueError(f'Unexpected store request in {store}') st_t = st_effect = st_sd = effect = sd = None if 't' in store or 'effect' in store: effect = np.dot(matrix, self.theta) if 'effect' in store: st_effect = np.squeeze(effect) if 't' in store or 'sd' in store: sd = np.sqrt(self.vcov(matrix=matrix, dispersion=dispersion)) if 'sd' in store: st_sd = np.squeeze(sd) if 't' in store: st_t = np.squeeze(effect * pos_recipr(sd)) return TContrastResults(effect=st_effect, t=st_t, sd=st_sd, df_den=self.df_resid) def Fcontrast(self, matrix, dispersion=None, invcov=None): """ Compute an Fcontrast for a contrast matrix `matrix`. Here, `matrix` M is assumed to be non-singular. More precisely .. math:: M pX pX' M' is assumed invertible. Here, :math:`pX` is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full. See the contrast module to see how to specify contrasts. In particular, the matrices from these contrasts will always be non-singular in the sense above. Parameters ---------- matrix : 1D array-like contrast matrix dispersion : None or float, optional If None, use ``self.dispersion`` invcov : None or array, optional Known inverse of variance covariance matrix. If None, calculate this matrix. Returns ------- f_res : ``FContrastResults`` instance with attributes F, df_den, df_num Notes ----- For F contrasts, we now specify an effect and covariance """ matrix = np.asarray(matrix) # 1D vectors assumed to be row vector if matrix.ndim == 1: matrix = matrix[None] if matrix.shape[1] != self.theta.shape[0]: raise ValueError("F contrasts should have shape[1] P=%d, " "but this has shape[1] %d" % (self.theta.shape[0], matrix.shape[1])) ctheta = np.dot(matrix, self.theta) if matrix.ndim == 1: matrix = matrix.reshape((1, matrix.shape[0])) if dispersion is None: dispersion = self.dispersion q = matrix.shape[0] if invcov is None: invcov = inv(self.vcov(matrix=matrix, dispersion=1.0)) F = np.add.reduce(np.dot(invcov, ctheta) * ctheta, 0) *\ pos_recipr(q * dispersion) F = np.squeeze(F) return FContrastResults( effect=ctheta, covariance=self.vcov( matrix=matrix, dispersion=dispersion[np.newaxis]), F=F, df_den=self.df_resid, df_num=invcov.shape[0]) def conf_int(self, alpha=.05, cols=None, dispersion=None): ''' The confidence interval of the specified theta estimates. Parameters ---------- alpha : float, optional The `alpha` level for the confidence interval. ie., `alpha` = .05 returns a 95% confidence interval. cols : tuple, optional `cols` specifies which confidence intervals to return dispersion : None or scalar scale factor for the variance / covariance (see class docstring and ``vcov`` method docstring) Returns ------- cis : ndarray `cis` is shape ``(len(cols), 2)`` where each row contains [lower, upper] for the given entry in `cols` Examples -------- >>> from numpy.random import standard_normal as stan >>> from nipy.algorithms.statistics.models.regression import OLSModel >>> x = np.hstack((stan((30,1)),stan((30,1)),stan((30,1)))) >>> beta=np.array([3.25, 1.5, 7.0]) >>> y = np.dot(x,beta) + stan((30)) >>> model = OLSModel(x).fit(y) >>> confidence_intervals = model.conf_int(cols=(1,2)) Notes ----- Confidence intervals are two-tailed. TODO: tails : string, optional `tails` can be "two", "upper", or "lower" ''' if cols is None: lower = self.theta - inv_t_cdf(1 - alpha / 2, self.df_resid) *\ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) upper = self.theta + inv_t_cdf(1 - alpha / 2, self.df_resid) *\ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) else: lower, upper = [], [] for i in cols: lower.append( self.theta[i] - inv_t_cdf(1 - alpha / 2, self.df_resid) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) upper.append( self.theta[i] + inv_t_cdf(1 - alpha / 2, self.df_resid) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) return np.asarray(list(zip(lower, upper))) class TContrastResults: """ Results from a t contrast of coefficients in a parametric model. The class does nothing, it is a container for the results from T contrasts, and returns the T-statistics when np.asarray is called. """ def __init__(self, t, sd, effect, df_den=None): if df_den is None: df_den = np.inf self.t = t self.sd = sd self.effect = effect self.df_den = df_den def __array__(self): return np.asarray(self.t) def __str__(self): return ('' % (self.effect, self.sd, self.t, self.df_den)) class FContrastResults: """ Results from an F contrast of coefficients in a parametric model. The class does nothing, it is a container for the results from F contrasts, and returns the F-statistics when np.asarray is called. """ def __init__(self, effect, covariance, F, df_num, df_den=None): if df_den is None: df_den = np.inf self.effect = effect self.covariance = covariance self.F = F self.df_den = df_den self.df_num = df_num def __array__(self): return np.asarray(self.F) def __str__(self): return '' % \ (repr(self.F), self.df_den, self.df_num) nipy-0.6.1/nipy/algorithms/statistics/models/nlsmodel.py000066400000000000000000000077611470056100100234760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Non-linear least squares model """ __docformat__ = 'restructuredtext' import numpy as np import numpy.linalg as npl from .model import Model class NLSModel(Model): """ Class representing a simple nonlinear least squares model. """ def __init__(self, Y, design, f, grad, theta, niter=10): """ Initialize non-linear model instance Parameters ---------- Y : ndarray the data in the NLS model design : ndarray the design matrix, X f : callable the map between the (linear parameters (in the design matrix) and the nonlinear parameters (theta)) and the predicted data. `f` accepts the design matrix and the parameters (theta) as input, and returns the predicted data at that design. grad : callable the gradient of f, this should be a function of an nxp design matrix X and qx1 vector theta that returns an nxq matrix df_i/dtheta_j where: .. math:: f_i(theta) = f(X[i], theta) is the nonlinear response function for the i-th instance in the model. theta : array parameters niter : int number of iterations """ Model.__init__(self) self.Y = Y self.design = design self.f = f self.grad = grad self.theta = theta self.niter = niter if self.design is not None and self.Y is not None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def _Y_changed(self): if self.design is not None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def _design_changed(self): if self.Y is not None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def getZ(self): """ Set Z into `self` Returns ------- None """ self._Z = self.grad(self.design, self.theta) def getomega(self): """ Set omega into `self` Returns ------- None """ self._omega = self.predict() - np.dot(self._Z, self.theta) def predict(self, design=None): """ Get predicted values for `design` or ``self.design`` Parameters ---------- design : None or array, optional design at which to predict data. If None (the default) then use the initial ``self.design`` Returns ------- y_predicted : array predicted data at given (or initial) design """ if design is None: design = self.design return self.f(design, self.theta) def SSE(self): """ Sum of squares error. Returns ------- sse: float sum of squared residuals """ return sum((self.Y - self.predict()) ** 2) def __iter__(self): """ Get iterator from model instance Returns ------- itor : iterator Returns ``self`` """ if self.theta is not None: self.initial = self.theta elif self.initial is not None: self.theta = self.initial else: raise ValueError('need an initial estimate for theta') self._iter = 0 self.theta = self.initial return self def __next__(self): """ Do an iteration of fit Returns ------- None """ if self._iter < self.niter: self.getZ() self.getomega() Zpinv = npl.pinv(self._Z) self.theta = np.dot(Zpinv, self.Y - self._omega) else: raise StopIteration self._iter += 1 nipy-0.6.1/nipy/algorithms/statistics/models/regression.py000066400000000000000000000701661470056100100240400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements some standard regression models: OLS and WLS models, as well as an AR(p) regression model. Models are specified with a design matrix and are fit using their 'fit' method. Subclasses that have more complicated covariance matrices should write over the 'whiten' method as the fit method prewhitens the response by calling 'whiten'. General reference for regression models: 'Introduction to Linear Regression Analysis', Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. Wiley, 2006. """ __docformat__ = 'restructuredtext en' import warnings from functools import cached_property import numpy as np import numpy.linalg as npl import scipy.linalg as spl from scipy import stats from nipy.algorithms.utils.matrices import matrix_rank, pos_recipr # Legacy repr printing from numpy. from .model import LikelihoodModel, LikelihoodModelResults class OLSModel(LikelihoodModel): """ A simple ordinary least squares model. Parameters ---------- design : array-like This is your design matrix. Data are assumed to be column ordered with observations in rows. Methods ------- model.__init___(design) model.logL(b=self.beta, Y) Attributes ---------- design : ndarray This is the design, or X, matrix. wdesign : ndarray This is the whitened design matrix. `design` == `wdesign` by default for the OLSModel, though models that inherit from the OLSModel will whiten the design. calc_beta : ndarray This is the Moore-Penrose pseudoinverse of the whitened design matrix. normalized_cov_beta : ndarray ``np.dot(calc_beta, calc_beta.T)`` df_resid : scalar Degrees of freedom of the residuals. Number of observations less the rank of the design. df_model : scalar Degrees of freedome of the model. The rank of the design. Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,2,3,4], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = OLSModel(dmtx) >>> results = model.fit(data['Y']) >>> results.theta array([ 0.25 , 2.14285714]) >>> results.t() array([ 0.98019606, 1.87867287]) >>> print(results.Tcontrast([0,1])) #doctest: +FLOAT_CMP >>> print(results.Fcontrast(np.eye(2))) #doctest: +FLOAT_CMP """ def __init__(self, design): """ Parameters ---------- design : array-like This is your design matrix. Data are assumed to be column ordered with observations in rows. """ super().__init__() self.initialize(design) def initialize(self, design): # PLEASE don't assume we have a constant... # TODO: handle case for noconstant regression self.design = design self.wdesign = self.whiten(self.design) self.calc_beta = npl.pinv(self.wdesign) self.normalized_cov_beta = np.dot(self.calc_beta, np.transpose(self.calc_beta)) self.df_total = self.wdesign.shape[0] self.df_model = matrix_rank(self.design) self.df_resid = self.df_total - self.df_model def logL(self, beta, Y, nuisance=None): r''' Returns the value of the loglikelihood function at beta. Given the whitened design matrix, the loglikelihood is evaluated at the parameter vector, beta, for the dependent variable, Y and the nuisance parameter, sigma. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. Y : ndarray The dependent variable nuisance : dict, optional A dict with key 'sigma', which is an optional estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design. Returns ------- loglf : float The value of the loglikelihood function. Notes ----- The log-Likelihood Function is defined as .. math:: \ell(\beta,\sigma,Y)= -\frac{n}{2}\log(2\pi\sigma^2) - \|Y-X\beta\|^2/(2\sigma^2) The parameter :math:`\sigma` above is what is sometimes referred to as a nuisance parameter. That is, the likelihood is considered as a function of :math:`\beta`, but to evaluate it, a value of :math:`\sigma` is needed. If :math:`\sigma` is not provided, then its maximum likelihood estimate: .. math:: \hat{\sigma}(\beta) = \frac{\text{SSE}(\beta)}{n} is plugged in. This likelihood is now a function of only :math:`\beta` and is technically referred to as a profile-likelihood. References ---------- .. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003. ''' # This is overwriting an abstract method of LikelihoodModel X = self.wdesign wY = self.whiten(Y) r = wY - np.dot(X, beta) n = self.df_total SSE = (r ** 2).sum(0) if nuisance is None: sigmasq = SSE / n else: sigmasq = nuisance['sigma'] loglf = - n / 2. * np.log(2 * np.pi * sigmasq) - SSE / (2 * sigmasq) return loglf def score(self, beta, Y, nuisance=None): ''' Gradient of the loglikelihood function at (beta, Y, nuisance). The graient of the loglikelihood function at (beta, Y, nuisance) is the score function. See :meth:`logL` for details. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. Y : ndarray The dependent variable. nuisance : dict, optional A dict with key 'sigma', which is an optional estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design. Returns ------- The gradient of the loglikelihood function. ''' # This is overwriting an abstract method of LikelihoodModel X = self.wdesign wY = self.whiten(Y) r = wY - np.dot(X, beta) n = self.df_total if nuisance is None: SSE = (r ** 2).sum(0) sigmasq = SSE / n else: sigmasq = nuisance['sigma'] return np.dot(X, r) / sigmasq def information(self, beta, nuisance=None): ''' Returns the information matrix at (beta, Y, nuisance). See logL for details. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. nuisance : dict A dict with key 'sigma', which is an estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n`` where n=Y.shape[0], X=self.design. Returns ------- info : array The information matrix, the negative of the inverse of the Hessian of the of the log-likelihood function evaluated at (theta, Y, nuisance). ''' # This is overwriting an abstract method of LikelihoodModel # The subclasses WLSModel, ARModel and GLSModel all overwrite this # method. The point of these subclasses is such that not much of # OLSModel has to be changed. X = self.design sigmasq = nuisance['sigma'] C = sigmasq * np.dot(X.T, X) return C def whiten(self, X): """ Whiten design matrix Parameters ---------- X : array design matrix Returns ------- wX : array This matrix is the matrix whose pseudoinverse is ultimately used in estimating the coefficients. For OLSModel, it is does nothing. For WLSmodel, ARmodel, it pre-applies a square root of the covariance matrix to X. """ return X @cached_property def has_intercept(self): """ Check if column of 1s is in column space of design """ o = np.ones(self.design.shape[0]) obeta = np.dot(self.calc_beta, o) ohat = np.dot(self.wdesign, obeta) return np.allclose(ohat, o) @cached_property def rank(self): """ Compute rank of design matrix """ return matrix_rank(self.wdesign) def fit(self, Y): """ Fit model to data `Y` Full fit of the model including estimate of covariance matrix, (whitened) residuals and scale. Parameters ---------- Y : array-like The dependent variable for the Least Squares problem. Returns ------- fit : RegressionResults """ # Other estimates of the covariance matrix for a heteroscedastic # regression model can be implemented in WLSmodel. (Weighted least # squares models assume covariance is diagonal, i.e. heteroscedastic). wY = self.whiten(Y) beta = np.dot(self.calc_beta, wY) wresid = wY - np.dot(self.wdesign, beta) dispersion = np.sum(wresid ** 2, 0) / (self.wdesign.shape[0] - self.wdesign.shape[1]) lfit = RegressionResults(beta, Y, self, wY, wresid, dispersion=dispersion, cov=self.normalized_cov_beta) return lfit class ARModel(OLSModel): """ A regression model with an AR(p) covariance structure. In terms of a LikelihoodModel, the parameters are beta, the usual regression parameters, and sigma, a scalar nuisance parameter that shows up as multiplier in front of the AR(p) covariance. The linear autoregressive process of order p--AR(p)--is defined as: TODO Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,8,10,9], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = ARModel(dmtx, 2) We go through the ``model.iterative_fit`` procedure long-hand: >>> for i in range(6): ... results = model.fit(data['Y']) ... print("AR coefficients:", model.rho) ... rho, sigma = yule_walker(data["Y"] - results.predicted, ... order=2, ... df=model.df_resid) ... model = ARModel(model.design, rho) #doctest: +FLOAT_CMP ... AR coefficients: [ 0. 0.] AR coefficients: [-0.61530877 -1.01542645] AR coefficients: [-0.72660832 -1.06201457] AR coefficients: [-0.7220361 -1.05365352] AR coefficients: [-0.72229201 -1.05408193] AR coefficients: [-0.722278 -1.05405838] >>> results.theta #doctest: +FLOAT_CMP array([ 1.59564228, -0.58562172]) >>> results.t() #doctest: +FLOAT_CMP array([ 38.0890515 , -3.45429252]) >>> print(results.Tcontrast([0,1])) #doctest: +FLOAT_CMP >>> print(results.Fcontrast(np.identity(2))) #doctest: +FLOAT_CMP Reinitialize the model, and do the automated iterative fit >>> model.rho = np.array([0,0]) >>> model.iterative_fit(data['Y'], niter=3) >>> print(model.rho) #doctest: +FLOAT_CMP [-0.7220361 -1.05365352] """ def __init__(self, design, rho): """ Initialize AR model instance Parameters ---------- design : ndarray 2D array with design matrix rho : int or array-like If int, gives order of model, and initializes rho to zeros. If ndarray, gives initial estimate of rho. Be careful as ``ARModel(X, 1) != ARModel(X, 1.0)``. """ if type(rho) is int: self.order = rho self.rho = np.zeros(self.order, np.float64) else: self.rho = np.squeeze(np.asarray(rho)) if len(self.rho.shape) not in [0, 1]: raise ValueError("AR parameters must be a scalar or a vector") if self.rho.shape == (): self.rho.shape = (1,) self.order = self.rho.shape[0] super().__init__(design) def iterative_fit(self, Y, niter=3): """ Perform an iterative two-stage procedure to estimate AR(p) parameters and regression coefficients simultaneously. Parameters ---------- Y : ndarray data to which to fit model niter : optional, int the number of iterations (default 3) Returns ------- None """ for i in range(niter): self.initialize(self.design) results = self.fit(Y) self.rho, _ = yule_walker(Y - results.predicted, order=self.order, df=self.df_resid) def whiten(self, X): """ Whiten a series of columns according to AR(p) covariance structure Parameters ---------- X : array-like of shape (n_features) array to whiten Returns ------- wX : ndarray X whitened with order self.order AR """ X = np.asarray(X, np.float64) _X = X.copy() for i in range(self.order): _X[(i + 1):] = _X[(i + 1):] - self.rho[i] * X[0: - (i + 1)] return _X def yule_walker(X, order=1, method="unbiased", df=None, inv=False): """ Estimate AR(p) parameters from a sequence X using Yule-Walker equation. unbiased or maximum-likelihood estimator (mle) See, for example: http://en.wikipedia.org/wiki/Autoregressive_moving_average_model Parameters ---------- X : ndarray of shape(n) order : int, optional Order of AR process. method : str, optional Method can be "unbiased" or "mle" and this determines denominator in estimate of autocorrelation function (ACF) at lag k. If "mle", the denominator is n=X.shape[0], if "unbiased" the denominator is n-k. df : int, optional Specifies the degrees of freedom. If df is supplied, then it is assumed the X has df degrees of freedom rather than n. inv : bool, optional Whether to return the inverse of the R matrix (see code) Returns ------- rho : (`order`,) ndarray sigma : int standard deviation of the residuals after fit R_inv : ndarray If `inv` is True, also return the inverse of the R matrix Notes ----- See also http://en.wikipedia.org/wiki/AR_model#Calculation_of_the_AR_parameters """ method = str(method).lower() if method not in ["unbiased", "mle"]: raise ValueError("ACF estimation method must be 'unbiased or 'MLE'") X = np.asarray(X, np.float64) if X.ndim != 1: raise ValueError("Expecting a vector to estimate AR parameters") X -= X.mean(0) n = df or X.shape[0] if method == "unbiased": den = lambda k: n - k else: den = lambda k: n r = np.zeros(order + 1, np.float64) r[0] = (X ** 2).sum() / den(0) for k in range(1, order + 1): r[k] = (X[0: - k] * X[k:]).sum() / den(k) R = spl.toeplitz(r[: - 1]) rho = spl.solve(R, r[1:]) sigmasq = r[0] - (r[1:] * rho).sum() if inv == True: return rho, np.sqrt(sigmasq), spl.inv(R) return rho, np.sqrt(sigmasq) def ar_bias_corrector(design, calc_beta, order=1): """ Return bias correcting matrix for `design` and AR order `order` There is a slight bias in the rho estimates on residuals due to the correlations induced in the residuals by fitting a linear model. See [Worsley2002]_. This routine implements the bias correction described in appendix A.1 of [Worsley2002]_. Parameters ---------- design : array Design matrix calc_beta : array Moore-Penrose pseudoinverse of the (maybe) whitened design matrix. This is the matrix that, when applied to the (maybe whitened) data, produces the betas. order : int, optional Order p of AR(p) process Returns ------- invM : array Matrix to bias correct estimated covariance matrix in calculating the AR coefficients References ---------- .. [Worsley2002] K.J. Worsley, C.H. Liao, J. Aston, V. Petre, G.H. Duncan, F. Morales, A.C. Evans (2002) A General Statistical Analysis for fMRI Data. Neuroimage 15:1:15 """ R = np.eye(design.shape[0]) - np.dot(design, calc_beta) M = np.zeros((order + 1,) * 2) I = np.eye(R.shape[0]) for i in range(order + 1): Di = np.dot(R, spl.toeplitz(I[i])) for j in range(order + 1): Dj = np.dot(R, spl.toeplitz(I[j])) M[i, j] = np.diag((np.dot(Di, Dj)) / (1. + (i > 0))).sum() return spl.inv(M) def ar_bias_correct(results, order, invM=None): """ Apply bias correction in calculating AR(p) coefficients from `results` There is a slight bias in the rho estimates on residuals due to the correlations induced in the residuals by fitting a linear model. See [Worsley2002]_. This routine implements the bias correction described in appendix A.1 of [Worsley2002]_. Parameters ---------- results : ndarray or results object If ndarray, assume these are residuals, from a simple model. If a results object, with attribute ``resid``, then use these for the residuals. See Notes for more detail order : int Order ``p`` of AR(p) model invM : None or array Known bias correcting matrix for covariance. If None, calculate from ``results.model`` Returns ------- rho : array Bias-corrected AR(p) coefficients Notes ----- If `results` has attributes ``resid`` and ``scale``, then assume ``scale`` has come from a fit of a potentially customized model, and we use that for the sum of squared residuals. In this case we also need ``results.df_resid``. Otherwise we assume this is a simple Gaussian model, like OLS, and take the simple sum of squares of the residuals. References ---------- .. [Worsley2002] K.J. Worsley, C.H. Liao, J. Aston, V. Petre, G.H. Duncan, F. Morales, A.C. Evans (2002) A General Statistical Analysis for fMRI Data. Neuroimage 15:1:15 """ if invM is None: # We need a model from ``results`` if invM is not specified model = results.model invM = ar_bias_corrector(model.design, model.calc_beta, order) if hasattr(results, 'resid'): resid = results.resid else: resid = results in_shape = resid.shape n_features = in_shape[0] # Allows results residuals to have shapes other than 2D. This allows us to # use this routine for image data as well as more standard 2D model data resid = resid.reshape((n_features, - 1)) # glm.Model fit methods fill in a ``scale`` estimate. For simpler # models, there is no scale estimate written into the results. # However, the same calculation resolves (with Gaussian family) # to ``np.sum(resid**2) / results.df_resid``. # See ``estimate_scale`` from glm.Model if hasattr(results, 'scale'): sum_sq = results.scale.reshape(resid.shape[1:]) * results.df_resid else: # No scale in results sum_sq = np.sum(resid ** 2, axis=0) cov = np.zeros((order + 1,) + sum_sq.shape) cov[0] = sum_sq for i in range(1, order + 1): cov[i] = np.sum(resid[i:] * resid[0:- i], axis=0) # cov is shape (order + 1, V) where V = np.prod(in_shape[1:]) cov = np.dot(invM, cov) output = cov[1:] * pos_recipr(cov[0]) return np.squeeze(output.reshape((order,) + in_shape[1:])) class AREstimator: """ A class to estimate AR(p) coefficients from residuals """ def __init__(self, model, p=1): """ Bias-correcting AR estimation class Parameters ---------- model : ``OSLModel`` instance A models.regression.OLSmodel instance, where `model` has attribute ``design`` p : int, optional Order of AR(p) noise """ self.p = p self.invM = ar_bias_corrector(model.design, model.calc_beta, p) def __call__(self, results): """ Calculate AR(p) coefficients from `results`.``residuals`` Parameters ---------- results : Results instance A models.model.LikelihoodModelResults instance Returns ------- ar_p : array AR(p) coefficients """ return ar_bias_correct(results, self.p, self.invM) class WLSModel(OLSModel): """ A regression model with diagonal but non-identity covariance structure. The weights are presumed to be (proportional to the) inverse of the variance of the observations. Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,2,3,4], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = WLSModel(dmtx, weights=range(1,8)) >>> results = model.fit(data['Y']) >>> results.theta array([ 0.0952381 , 2.91666667]) >>> results.t() array([ 0.35684428, 2.0652652 ]) >>> print(results.Tcontrast([0,1])) #doctest: +FLOAT_CMP >>> print(results.Fcontrast(np.identity(2))) #doctest: +FLOAT_CMP """ def __init__(self, design, weights=1): weights = np.array(weights) if weights.shape == (): # scalar self.weights = weights else: design_rows = design.shape[0] if not(weights.shape[0] == design_rows and weights.size == design_rows): raise ValueError( 'Weights must be scalar or same length as design') self.weights = weights.reshape(design_rows) super().__init__(design) def whiten(self, X): """ Whitener for WLS model, multiplies by sqrt(self.weights) """ X = np.asarray(X, np.float64) if X.ndim == 1: return X * np.sqrt(self.weights) elif X.ndim == 2: c = np.sqrt(self.weights) v = np.zeros(X.shape, np.float64) for i in range(X.shape[1]): v[:, i] = X[:, i] * c return v class RegressionResults(LikelihoodModelResults): """ This class summarizes the fit of a linear regression model. It handles the output of contrasts, estimates of covariance, etc. """ def __init__(self, theta, Y, model, wY, wresid, cov=None, dispersion=1., nuisance=None): """See LikelihoodModelResults constructor. The only difference is that the whitened Y and residual values are stored for a regression model. """ LikelihoodModelResults.__init__(self, theta, Y, model, cov, dispersion, nuisance) self.wY = wY self.wresid = wresid @cached_property def resid(self): """ Residuals from the fit. """ return self.Y - self.predicted @cached_property def norm_resid(self): """ Residuals, normalized to have unit length. Notes ----- Is this supposed to return "standardized residuals," residuals standardized to have mean zero and approximately unit variance? d_i = e_i / sqrt(MS_E) Where MS_E = SSE / (n - k) See: Montgomery and Peck 3.2.1 p. 68 Davidson and MacKinnon 15.2 p 662 """ return self.resid * pos_recipr(np.sqrt(self.dispersion)) @cached_property def predicted(self): """ Return linear predictor values from a design matrix. """ beta = self.theta # the LikelihoodModelResults has parameters named 'theta' X = self.model.design return np.dot(X, beta) @cached_property def R2_adj(self): """Return the R^2 value for each row of the response Y. Notes ----- Changed to the textbook definition of R^2. See: Davidson and MacKinnon p 74 """ if not self.model.has_intercept: warnings.warn("model does not have intercept term, " "SST inappropriate") d = 1. - self.R2 d *= ((self.df_total - 1.) / self.df_resid) return 1 - d @cached_property def R2(self): """ Return the adjusted R^2 value for each row of the response Y. Notes ----- Changed to the textbook definition of R^2. See: Davidson and MacKinnon p 74 """ d = self.SSE / self.SST return 1 - d @cached_property def SST(self): """Total sum of squares. If not from an OLS model this is "pseudo"-SST. """ if not self.model.has_intercept: warnings.warn("model does not have intercept term, " "SST inappropriate") return ((self.wY - self.wY.mean(0)) ** 2).sum(0) @cached_property def SSE(self): """Error sum of squares. If not from an OLS model this is "pseudo"-SSE. """ return (self.wresid ** 2).sum(0) @cached_property def SSR(self): """ Regression sum of squares """ return self.SST - self.SSE @cached_property def MSR(self): """ Mean square (regression)""" return self.SSR / (self.df_model - 1) @cached_property def MSE(self): """ Mean square (error) """ return self.SSE / self.df_resid @cached_property def MST(self): """ Mean square (total) """ return self.SST / (self.df_total - 1) @cached_property def F_overall(self): """ Overall goodness of fit F test, comparing model to a model with just an intercept. If not an OLS model this is a pseudo-F. """ F = self.MSR / self.MSE Fp = stats.f.sf(F, self.df_model - 1, self.df_resid) return {'F': F, 'p_value': Fp, 'df_num': self.df_model-1, 'df_den': self.df_resid} class GLSModel(OLSModel): """Generalized least squares model with a general covariance structure """ def __init__(self, design, sigma): self.cholsigmainv = npl.cholesky(npl.pinv(sigma)).T super().__init__(design) def whiten(self, Y): return np.dot(self.cholsigmainv, Y) def isestimable(C, D): """ True if (Q, P) contrast `C` is estimable for (N, P) design `D` From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if the contrast `C` is estimable by looking at the rank of ``vstack([C,D])`` and verifying it is the same as the rank of `D`. Parameters ---------- C : (Q, P) array-like contrast matrix. If `C` has is 1 dimensional assume shape (1, P) D: (N, P) array-like design matrix Returns ------- tf : bool True if the contrast `C` is estimable on design `D` Examples -------- >>> D = np.array([[1, 1, 1, 0, 0, 0], ... [0, 0, 0, 1, 1, 1], ... [1, 1, 1, 1, 1, 1]]).T >>> isestimable([1, 0, 0], D) False >>> isestimable([1, -1, 0], D) True """ C = np.asarray(C) D = np.asarray(D) if C.ndim == 1: C = C[None, :] if C.shape[1] != D.shape[1]: raise ValueError('Contrast should have %d columns' % D.shape[1]) new = np.vstack([C, D]) return matrix_rank(new) == matrix_rank(D) nipy-0.6.1/nipy/algorithms/statistics/models/tests/000077500000000000000000000000001470056100100224365ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/models/tests/__init__.py000066400000000000000000000000001470056100100245350ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/models/tests/exampledata.py000066400000000000000000000004771470056100100253050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import numpy as np filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_data.bin") data = np.fromfile(filename, " 1: df[n] = c.shape[0] SS[n] = np.dot(cbeta, np.dot(np.linalg.pinv(cov_cbeta), cbeta)) MS[n] = SS[n] / df[n] F[n] = MS[n] / sigmasq else: df[n] = 1 SS[n] = (cbeta**2).sum() / cov_cbeta MS[n] = SS[n] / df[n] F[n] = MS[n] / sigmasq p[n] = scipy.stats.f.sf(F[n], df[n], df_resid) routput = \ """ Output of R: ----------- > anova(lm(Days~Duration*Weight, X)) Analysis of Variance Table Response: Days Df Sum Sq Mean Sq F value Pr(>F) Duration 1 209.07 209.07 7.2147 0.009587 ** Weight 2 760.43 380.22 13.1210 2.269e-05 *** Duration:Weight 2 109.03 54.52 1.8813 0.162240 Residuals 54 1564.80 28.98 --- """ def test_Ragreement(): # This code would fit the two-way ANOVA model in R # X = read.table('http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/kidney.table', header=T) # names(X) # X$Duration = factor(X$Duration) # X$Weight = factor(X$Weight) # lm(Days~Duration*Weight, X) # A = anova(lm(Days~Duration*Weight, X)) # rA = rpy.r('A') rA = {'Df': [1, 2, 2, 54], 'F value': [7.2147239263803673, 13.120973926380339, 1.8813266871165633, np.nan], 'Mean Sq': [209.06666666666663, 380.21666666666584, 54.51666666666663, 28.977777777777778], 'Pr(>F)': [0.0095871255601553771, 2.2687781292164585e-05, 0.16224035152442268, np.nan], 'Sum Sq': [209.06666666666663, 760.43333333333169, 109.03333333333326, 1564.8]} # rn = rpy.r('rownames(A)') rn= ['Duration', 'Weight', 'Duration:Weight', 'Residuals'] pairs = [(rn.index('Duration'), 'Duration'), (rn.index('Weight'), 'Weight'), (rn.index('Duration:Weight'), 'Interaction')] for i, j in pairs: assert_almost_equal(F[j], rA['F value'][i]) assert_almost_equal(p[j], rA['Pr(>F)'][i]) assert_almost_equal(MS[j], rA['Mean Sq'][i]) assert_almost_equal(df[j], rA['Df'][i]) assert_almost_equal(SS[j], rA['Sum Sq'][i]) def test_scipy_stats(): # Using scipy.stats.models X, cons = twoway.design(D, contrasts=contrasts) Y = D['Days'] m = OLSModel(X) f = m.fit(Y) F_m = {} df_m = {} p_m = {} for n, c in cons.items(): r = f.Fcontrast(c) F_m[n] = r.F df_m[n] = r.df_num p_m[n] = scipy.stats.f.sf(F_m[n], df_m[n], r.df_den) assert_almost_equal(F[n], F_m[n]) assert_almost_equal(df[n], df_m[n]) assert_almost_equal(p[n], p_m[n]) nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_ar.py000066400000000000000000000015701470056100100244540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .. import regression from .exampledata import x, y # FIXME: This test does not test any values # TODO: spend an hour or so to create a test like test_ols.py # with R's output, the script and the data used for the script # # Although, it should be said that this, in R # x = as.matrix(read_table('x.csv')) # y = as.matrix(read_table('y.csv')) # res = arima(y, xreg=x, order=c(2,0,0)) # # gives an error ``system is computationally singular`` def test_armodel(): for i in range(1,4): model = regression.ARModel(x, i) for i in range(20): results = model.fit(y) rho, sigma = regression.yule_walker(y - results.predicted) model = regression.ARModel(model.design, rho) print("AR coefficients:", model.rho) nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_data.bin000066400000000000000000000354201470056100100251040ustar00rootroot00000000000000R3o@jsXSag7{ᘿz̰?Z?+T tLd hJ?e +p@ҍRk]?PE#PF@?r҉KX?n=DKZ5CA?dC7@V.jp@Tǩ @:r!0p@jT7`?$~]W?]f[?Va@M m;?dC7@V.jp0@TǩP@Kp@ _EEJS@&!!b?!V?{cP_:?s'Y?u%G ~:?"S@![x~B@ʔ'_l@DRp@;f(pq u?v=?T?Ў?SN]Y? Q1;Bll#?dC7 @V.jpP@Tǩ@sC[shp@kЭll0'r?(w9?>;?@PV?6Bc9C2Dbu%?E$@f.)Y@JبE@喩1iAp@*rKcySi?47akq?O?Y[q?;S? I,?@G??f,>2@zfGt@eIP@QDp@ dJXnΣ?ͽ^1`?u??PFvS?3#@@n>?E4@f.)y@JبE@5KS?Co@k>?"S8@![x~@ʔ'_@&eHp@=YӽΒ??ޑ$?J T?6t> H4-< ??B[ΥZ:@-s=@m@au!@?,#8p@F-Xʩ0?@{3?h?3V?HMqAxӴ+"??oض(a<@Q#,@6zS@u,p@fFe2?$]hX$?+JR?x.?&n@Ʒ %??Uh>@tT@O\u@bz :p@0Vx?o?*/!?ſKDT? U?Em}c€#??dC7@@V.jp@Tǩ@o&p@uOW9j*?f?rт?/PR?AEaB@zfGΔ@eIP@ 7p@=֊`?kŃ?2GA?8yQ?G\ElPZ"??'KBC@X܆.@M@+JBeHp@܃?oxԢ?ʞ:?B a?_e< jZqƬRZ@V?v?f' N ??B[ΥZJ@-s=@m@au!ALRN8p@Z b*?\J@?`~/PU?O =s?ٙB5^K@zISh@AEF՜Kp@ Fq'WN=?EyTؔ?ČwFbgCW?xNWD`.ҏ=??oض(aL@Q#,@6zSAݛ .p@MQɶ{.]?|OR?4#,Rl,1W?jݗ-;`Z?+j0eM@U@A^-p@ QǿbcE-&՘?/O}q,']^ŞV?T\ám=b ?UhN@tT@O\u A'2;p@Yڠ\ȔD#?h5=k k&?#V? lPCY[?2*lO@yOۮ@#MuLAd'+p@WxrBJ!? R@zfGδ@eIPAZ ֣@q~|`p@꾤(zs>`??w}?!F?{i7 k*6?܅:R@K@wkAzBؔ@Z?5p@;ၒcžn6|`?@8j.r?F%:B?lz@*C$(2?'KBS@X܆.@MA{N@sN5Gp@<2!j?x*KT(w?#ܽ8?J==?fusB4&?rZ| S@!Ej@v8*AqL_*@RعDp@'z"zE]F?)?7wy?ccF?jWe@F*-$?ET@f.)@JبE A&|X@1Jp@wEySH3 jEDh?a+?~\!B?$contXC[?5@V@1'A@*ga+'A?q@PW@`&@چ(A#ߖr@Ln!Q2Fp@ h`5#p{PS?{r{t?NKHD?˫>iCBt C"?˾+W@[bdA@~/e*AH@Qb۠Fp@jDF8Tlf.H?"^#?l_u?ƄI?@$`F-L?"SX@![x~@ʔ'_,AX#H @R:p@\ZY* Cn?nL,?xO?Qy`I? IOEPd8??a|X@BlE@-A@T"p@<7ИLRZ?IzV?$??~E0?Zd;WY@,:@| ƹS/A$r|w@%%p@bs?]3Uo0Ybjx?to6?->I ͗6>?;Y@8@4*0AS@M%T">p@ tLd? N5doLR?Lb̂?NJBG?c2J'4־?B[ΥZZ@-s=@m@au!1AX@`(p@LM#㣙G@>Pa7/k:?.)G2u?$[@f H@FvĹ&5Aw0)@^=p@r\lm='q?LxC)?@?Y CP}d\?oض(a\@Q#,@6zS6AA}s@VAp@ =hȊT?rc?T5HU,?XiGŠ@2xvv>?pq\@?tW@˫)7A/pܹA%Lp@N܂p)?@BwJnˆ?/喿&?1&0eٰw??+j0e]@U@8A''AS6?p@+M*K؆,U?DѼ@{ @7AdnunT6{#&??P6 ]@!@aws:A|ɧAO{F1p@_uf?|B#y?47Ρ?D}1D11a??Uh^@tT@O\u;A"'A _A6#07p@S~\q{7`8tzx»?Cu*7MT`6^?tYLl^@r`@@f*@i#M?RT?pf'*f 8\I(#?2*l_@yO@#MuL>A2[ Ap@O/ g˖!c?ps?^q,4eCr7O*)?}͍_@-T@ WO?Ae.?آ*? gx`@0Z@ϠtAA\%J@[_*p@jcB=fB%fän c?H銿A{^0a+}h(>2?`@Jƍ{@RY׭RFBAR%`h@N0p@'/=pFgOL/#q?0(* 5ø[|A:;+0#?U[r`@yh?4@#W6CAԜ?}@w&p@Jo=>r^GuOF?e2m*B+\z?'XQ;a@O7@u0CAx@o-p@삟SPj*?,ns(#^>1֕?70|a@Z@ȩDA~L4@$p@ˎ0gSٜW䫽`ADŽ`Q*bҜ>zE)'?EGra@.{@396EA@tp@*O mIM$ }V-iV|/Έ? :͍,?VIa@7QZ;@ r)FA$D@]8Up@ p)Kғ 5K9Y65oU3\:'?f,>b@zfG@eIPGAZ ֣@3;+l5p@T@'|_tj'4Meb=E3ڥ <3hw-?6vb@y0Bc@VHA1Je3@Qݣ]3p@=K)mP^JYbpOJ8H(`\_MgNܔF3?܅:b@K@wkIAzB@KT5p@B»x='ٱRRZ?ePv%6 jgu5)a4?Clc@RH2a@aJA&@c63p@G!t=#Z 6?<<6hMxp 0I8܇q)2"pZ)?'KBc@X܆.@MKA{N@/7f[7p@FyF#=;@7p@Uh'/@(eS@T? J\;/<|98䱲Q?6&řI#?Ed@f.)@JبEPA&|X@S7?p@ow)@~̈A:$?Zƨ=D߀?%?cqmd@j]U@@(PPAPI-@{uE%p@]l^MMa[z?|*l8D>L,0P?\ ?Ͻd@Р@Z˅AQAV/m7K@)p@](KkSV9`u |R+-?5?Xɩ-?@ .?,ge@@\7F>,RA@#p@FK}^ Nk^t 0Ԝol_ni;d@ o$? Qz#?S"^FIe@|KQ@-nRAۣw@f cp@Qo([fXu I^B' Υq*A&K:e?.O98?1%e@l~K@pE˸SAϰ|X?@f#p@}tڵҫ`4inps ٱ{;MrELD I"(Y<?ACe@jf,@/(7TAaq&M@ȕV#p@!eUq;\΀:5"?աD B?`Lf@C@׭UA%w@@ 9/p@P I x)հ;WH_8^^H(%?Ʈ&rB?pZf@@@e@X#gVA_ AP4p@Fk9沁iR8`T0E?fVFgA QЗdt ?>t@?5@f@1'A@*ga+WA-%?סQ??ڏag@/ /#@)ELWA ʇAغu@p@,Aõxܰ?Pk>PJ+0?8=7?q@Pg@`&@چXA#ߖrA_SMDp@BdD vʟ vm+?XC FZ8PV_a)'4?Di<?%1 g@|s0[@6~YAT<8= AX9=p@dX!걿^} i?P˪/uƄƶTÎ8?[ƥ<?˾+g@[bdA@~/eZAH AQ5p@¯ێ׾t2:?ʇ%?;'m!W86?CCI<?pΈh@$\Y@[&?[A1??O-0p@vL9? _?P SU"7?)D A2?"Sh@![x~@ʔ'_\Ao1@33p@^L2+ Fve?iR_)?m?<,SDm1?\ ;?Bsh@QDU`@aW]A- Y@=a p@,Ӊf32xBPYen&RjRg#,4? kb@?a|h@BlE@]Ai?r@ӕ p@@Z7`׃oe:SÖ[J)?@? \i@H~@f[^A }G@m.Y)p@ T\=0mx٬2 uWꍿh!~FX!R|cZk/?l_r2?Zd;Wi@,:@| ƹS_AG@G%&p@T+]nx0e{j0)΋d->GV\4 6?T6.7?R,i@ K)|x@VGn_`An74@ߺ p@738k7ǽjY  \b?? MkK??P6 m@!@awsjA˱@ b/5p@ o='?"q&Rֲo`YZu c?P??EB['n@lMoj@YojAclm'@G9p@dy=OXP? .6_PaЉמ+]Y ` d^?XRX??Uhn@tT@O\ukAsAb.p@cv= ?ͫ+Տp;q5VCU[?\^??Aen@h ja@&lAr{AYԇFS(p@Nk<=M aL?H^нx۳)?ӛ\{aC`*8Ul[?`ՕTZ??tYLln@r`@@f*lA#Am?p@9llh @n?V0n񉛥? P\zS :Q+X?J9\??K+o@t\@(ǑOmAvABmp@mw5?0GDlr~)?J&<`" Y?d;j[??2*lo@yO@#MuLnA4j A2q.G&p@ C$< ?bIė2??f4] iX?"c_??ףp= o@8ōZ@+iká oACD Anipy-0.6.1/nipy/algorithms/statistics/models/tests/test_estimable.py000066400000000000000000000022621470056100100260160ustar00rootroot00000000000000""" Testing ``isestimable`` in regression module """ import numpy as np import pytest from ..regression import isestimable def test_estimable(): rng = np.random.RandomState(20120713) N, P = (40, 10) X = rng.normal(size=(N, P)) C = rng.normal(size=(1, P)) assert isestimable(C, X) assert isestimable(np.eye(P), X) for row in np.eye(P): assert isestimable(row, X) X = np.ones((40, 2)) assert isestimable([1, 1], X) assert not isestimable([1, 0], X) assert not isestimable([0, 1], X) assert not isestimable(np.eye(2), X) halfX = rng.normal(size=(N, 5)) X = np.hstack([halfX, halfX]) assert not isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X) assert not isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X) assert isestimable(np.hstack([np.eye(5), np.eye(5)]), X) # Test array-like for design XL = X.tolist() assert isestimable(np.hstack([np.eye(5), np.eye(5)]), XL) # Test ValueError for incorrect number of columns X = rng.normal(size=(N, 5)) for n in range(1, 4): pytest.raises(ValueError, isestimable, np.ones((n,)), X) pytest.raises(ValueError, isestimable, np.eye(4), X) nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_glm.py000066400000000000000000000020531470056100100246260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.GLM """ import numpy as np import pytest from .. import family from ..glm import Model as GLM @pytest.fixture def x_y(): rng = np.random.RandomState(20110928) X = rng.standard_normal((40,10)) Y = rng.standard_normal((40,)) Y = np.greater(Y, 0) return {'X': X, 'Y': Y} def test_Logistic(x_y): X = x_y['X'] Y = x_y['Y'] cmodel = GLM(design=X, family=family.Binomial()) results = cmodel.fit(Y) assert results.df_resid == 30 def test_cont(x_y): # Test continue function works as expected X = x_y['X'] Y = x_y['Y'] cmodel = GLM(design=X, family=family.Binomial()) cmodel.fit(Y) assert cmodel.cont(0) assert not cmodel.cont(np.inf) def test_Logisticdegenerate(x_y): X = x_y['X'].copy() X[:,0] = X[:,1] + X[:,2] Y = x_y['Y'] cmodel = GLM(design=X, family=family.Binomial()) results = cmodel.fit(Y) assert results.df_resid == 31 nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_model.py000066400000000000000000000076401470056100100251560ustar00rootroot00000000000000""" Testing models module """ import numpy as np import pytest from numpy.testing import assert_array_almost_equal # In fact we're testing methods defined in model from ..regression import OLSModel N = 10 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] Y = np.r_[:5, 1:6] MODEL = OLSModel(X) RESULTS = MODEL.fit(Y) """ R script :: X = cbind(0:9 * 2/9 -1, 1) Y = as.matrix(c(0:4, 1:5)) results = lm(Y ~ X-1) print(results) print(summary(results)) gives:: Call: lm(formula = Y ~ X - 1) Coefficients: X1 X2 1.773 2.500 Residuals: Min 1Q Median 3Q Max -1.6970 -0.6667 0.0000 0.6667 1.6970 Coefficients: Estimate Std. Error t value Pr(>|t|) X1 1.7727 0.5455 3.250 0.0117 * X2 2.5000 0.3482 7.181 9.42e-05 *** --- Residual standard error: 1.101 on 8 degrees of freedom Multiple R-squared: 0.8859, Adjusted R-squared: 0.8574 F-statistic: 31.06 on 2 and 8 DF, p-value: 0.0001694 """ def test_model(): # Check basics about the model fit # Check we fit the mean assert_array_almost_equal(RESULTS.theta[1], np.mean(Y)) # Check we get the same as R assert_array_almost_equal(RESULTS.theta, [1.773, 2.5], 3) pcts = np.percentile(RESULTS.resid, [0,25,50,75,100]) assert_array_almost_equal(pcts, [-1.6970, -0.6667, 0, 0.6667, 1.6970], 4) def test_t_contrast(): # Test individual t against R assert_array_almost_equal(RESULTS.t(0), 3.25) assert_array_almost_equal(RESULTS.t(1), 7.181, 3) # And contrast assert_array_almost_equal(RESULTS.Tcontrast([1,0]).t, 3.25) assert_array_almost_equal(RESULTS.Tcontrast([0,1]).t, 7.181, 3) # Input matrix checked for size pytest.raises(ValueError, RESULTS.Tcontrast, [1]) pytest.raises(ValueError, RESULTS.Tcontrast, [1, 0, 0]) # And shape pytest.raises(ValueError, RESULTS.Tcontrast, np.array([1, 0])[:,None]) def test_t_output(): # Check we get required outputs exp_t = RESULTS.t(0) exp_effect = RESULTS.theta[0] exp_sd = exp_effect / exp_t res = RESULTS.Tcontrast([1,0]) assert_array_almost_equal(res.t, exp_t) assert_array_almost_equal(res.effect, exp_effect) assert_array_almost_equal(res.sd, exp_sd) res = RESULTS.Tcontrast([1,0], store=('effect',)) assert res.t == None assert_array_almost_equal(res.effect, exp_effect) assert res.sd == None res = RESULTS.Tcontrast([1,0], store=('t',)) assert_array_almost_equal(res.t, exp_t) assert res.effect == None assert res.sd == None res = RESULTS.Tcontrast([1,0], store=('sd',)) assert res.t == None assert res.effect == None assert_array_almost_equal(res.sd, exp_sd) res = RESULTS.Tcontrast([1,0], store=('effect', 'sd')) assert res.t == None assert_array_almost_equal(res.effect, exp_effect) assert_array_almost_equal(res.sd, exp_sd) def test_f_output(): # Test f_output res = RESULTS.Fcontrast([1,0]) exp_f = RESULTS.t(0) ** 2 assert_array_almost_equal(exp_f, res.F) # Test arrays work as well as lists res = RESULTS.Fcontrast(np.array([1,0])) assert_array_almost_equal(exp_f, res.F) # Test with matrix against R res = RESULTS.Fcontrast(np.eye(2)) assert_array_almost_equal(31.06, res.F, 2) # Input matrix checked for size pytest.raises(ValueError, RESULTS.Fcontrast, [1]) pytest.raises(ValueError, RESULTS.Fcontrast, [1, 0, 0]) # And shape pytest.raises(ValueError, RESULTS.Fcontrast, np.array([1, 0])[:,None]) def test_f_output_new_api(): res = RESULTS.Fcontrast([1, 0]) assert_array_almost_equal(res.effect, RESULTS.theta[0]) assert_array_almost_equal(res.covariance, RESULTS.vcov()[0][0]) def test_conf_int(): lower_, upper_ = RESULTS.conf_int() assert (lower_ < upper_).all() assert (lower_ > upper_ - 10).all() lower_, upper_ = RESULTS.conf_int(cols=[1]).T assert lower_ < upper_ assert lower_ > upper_ - 10 nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_olsR.py000066400000000000000000001554671470056100100250100ustar00rootroot00000000000000 import numpy as np import scipy.stats from numpy.testing import assert_almost_equal, assert_array_almost_equal import nipy.testing as niptest from ..regression import OLSModel from .exampledata import x, y Rscript = ''' d = read.table('data.csv', header=T, sep=' ') y.lm = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14, data=d) print(summary(y.lm)) y.lm2 = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 - 1, data=d) print(summary(y.lm2)) SSE = sum(resid(y.lm)^2) SST = sum((d$Y - mean(d$Y))^2) SSR = SST - SSE print(data.frame(SSE, SST, SSR)) MSE = SSE / y.lm$df.resid MST = SST / (length(d$Y) - 1) MSR = SSR / (length(d$Y) - y.lm$df.resid - 1) print(data.frame(MSE, MST, MSR)) print(AIC(y.lm)) print(AIC(y.lm2)) ''' # lines about "Signif. codes" were deleted due to a character encoding issue Rresults = \ """ These are the results from fitting the model in R, i.e. running the commands Rscript in R A few things to note, X8 is a column of 1s, so by not including a '-1' in the formula, X8 gets thrown out of the model, with its coefficients being the "(Intercept)" term. An alternative is to use "-1" in the formula, but then R gives nonsensical F, R2 and adjusted R2 values. This means that R2, R2a and F cannot fully be trusted in R. In OLSModel, we have checked whether a column of 1s is in the column space, in which case the F, R2, and R2a are seneible. > source('test.R') [1] "Without using '-1'" [1] "------------------" Call: lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14, data = d) Residuals: Min 1Q Median 3Q Max -2.125783 -0.567850 0.004305 0.532145 2.372263 Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 2.603e+02 8.226e-01 316.463 < 2e-16 *** X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 6.42e-12 *** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 * X7 1.339e+03 8.418e+02 1.591 0.1145 X8 NA NA NA NA X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 --- Residual standard error: 0.8019 on 112 degrees of freedom Multiple R-squared: 0.5737,Adjusted R-squared: 0.5242 F-statistic: 11.59 on 13 and 112 DF, p-value: 1.818e-15 [1] "Using '-1'" [1] "------------------" Call: lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 - 1, data = d) Residuals: Min 1Q Median 3Q Max -2.125783 -0.567850 0.004305 0.532145 2.372263 Coefficients: Estimate Std. Error t value Pr(>|t|) X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 6.42e-12 *** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 * X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 < 2e-16 *** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 --- Residual standard error: 0.8019 on 112 degrees of freedom Multiple R-squared: 1,Adjusted R-squared: 1 F-statistic: 9.399e+05 on 14 and 112 DF, p-value: < 2.2e-16 SSE SST SSR 1 72.02328 168.9401 96.91685 MSE MST MSR 1 0.643065 1.351521 7.455142 [1] "AIC" [1] 317.1017 [1] "BIC" [1] 359.6459 """ def test_results(): m = OLSModel(x) r = m.fit(y) # results hand compared with R's printout assert f'{r.R2:0.4f}' == '0.5737' assert f'{r.R2_adj:0.4f}' == '0.5242' f = r.F_overall assert f"{f['F']:0.2f}" == '11.59' assert f['df_num'] == 13 assert f['df_den'] == 112 assert f"{f['p_value']:0.3e}" == '1.818e-15' # test Fcontrast, the 8th column of m.design is all 1s # let's construct a contrast matrix that tests everything # but column 8 is zero M = np.identity(14) M = np.array([M[i] for i in [0,1,2,3,4,5,6,8,9,10,11,12,13]]) Fc = r.Fcontrast(M) assert_array_almost_equal([Fc.F], [f['F']], 6) assert_array_almost_equal([Fc.df_num], [f['df_num']], 6) assert_array_almost_equal([Fc.df_den], [f['df_den']], 6) thetas = [] sds = [] ts = [] ps = [] # the model has an intercept assert r.model.has_intercept # design matrix has full rank assert r.model.rank == 14 # design matrix has full rank assert r.df_model == 14 assert r.df_total == 126 assert r.df_resid == 112 # entries with '*****' are not tested as they were a different format resultstr = \ ''' X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 ****** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 ****** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 ****** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 ****** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 ''' for row in resultstr.strip().split('\n'): row = row.strip() _, th, sd, t, p = row.split() thetas.append(th) sds.append(sd) ts.append(t) ps.append(p) for th, thstr in zip(r.theta, thetas): assert f'{th:0.3e}' == thstr for sd, sdstr in zip([np.sqrt(r.vcov(column=i)) for i in range(14)], sds): assert f'{sd:0.3e}' == sdstr for t, tstr in zip([r.t(column=i) for i in range(14)], ts): assert f'{t:0.3f}' == tstr for i, t in enumerate([r.t(column=i) for i in range(14)]): m = np.zeros((14,)) m[i] = 1. tv = r.Tcontrast(m) e = r.theta[i] sd = np.sqrt(r.vcov(column=i)) assert_almost_equal(tv.t, t, 6) assert_almost_equal(tv.sd, sd, 6) assert_almost_equal(tv.effect, e, 6) for p, pstr in zip([2*scipy.stats.t.sf(np.fabs(r.t(column=i)), r.df_resid) for i in range(14)], ps): if pstr.find('*') < 0: assert f'{p:0.4f}' == pstr assert f"{r.SSE:0.5f}" == "72.02328" assert f"{r.SST:0.4f}" == "168.9401" assert f"{r.SSR:0.5f}" == "96.91685" assert f"{r.MSE:0.6f}" == "0.643065" assert f"{r.MST:0.6f}" == "1.351521" assert f"{r.MSR:0.6f}" == "7.455142" assert f"{np.sqrt(r.MSE):0.4f}" == "0.8019" # the difference here comes from the fact that # we've treated sigma as a nuisance parameter, # so our AIC is the AIC of the profiled log-likelihood... assert f'{r.AIC + 2:0.4f}' == '317.1017' assert f'{r.BIC + np.log(126):0.4f}' == '359.6459' # this is the file "data.csv" referred to in Rscript above Rdata = ''' Y X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 2.558020266818153345e+02 -4.423009200784273898e-02 -6.615177603161188392e-03 -2.429792163411158279e-02 4.236447886547620167e-02 1.618533936246031348e-03 -8.683269025079367589e-04 -8.181821468255191711e-04 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.573856564029982792e+02 -1.247753847628743987e-02 8.132393396825286086e-03 -4.413603363412710312e-02 3.174380286547619917e-02 1.507591026246031356e-03 -8.321096135079367661e-04 -5.268108768253958792e-04 1.000000000000000000e+00 2.027260000000000062e+00 4.109783107600000207e+00 8.331598902713176713e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590080857852332201e+02 -3.265906165554512651e-03 1.963457496825285822e-03 -1.398771363412710383e-02 3.088127086547619998e-02 1.672285950246031301e-03 -8.927174265079367271e-04 -4.244701868253958994e-04 1.000000000000000000e+00 4.054520000000000124e+00 1.643913243040000083e+01 6.665279122170541370e+01 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607408786477914759e+02 -8.017150588157394330e-04 2.213062996825285525e-03 1.398740365872893493e-03 1.085352386547620146e-02 1.533498042246031435e-03 -7.043727325079367782e-04 -4.042463468253959091e-04 1.000000000000000000e+00 6.081780000000000186e+00 3.698804796840000364e+01 2.249531703732557730e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.611418084786566283e+02 -1.861685769802005528e-04 1.047713639682528591e-02 1.167152736587289547e-02 1.489745686547620102e-02 1.548124779246031315e-03 -5.563730125079367241e-04 -1.481969968253959513e-04 1.000000000000000000e+00 8.109040000000000248e+00 6.575652972160000331e+01 5.332223297736433096e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.625281634787599501e+02 -4.117603177916723598e-05 9.983357396825286167e-03 2.268076636587289252e-02 3.341529466547620009e-02 1.378939226246031274e-03 -5.824833125079368051e-04 -1.637155968253958946e-04 1.000000000000000000e+00 1.013630000000000031e+01 1.027445776899999998e+02 1.041449862839147045e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600881821274363688e+02 -8.724125662125817594e-06 2.118458339682528432e-02 -3.638986341271063796e-04 7.819901865476201752e-03 1.343526296246031447e-03 -4.266495825079367706e-04 -3.036430682539588335e-05 1.000000000000000000e+00 1.216356000000000037e+01 1.479521918736000146e+02 1.799625362986046184e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604916986023719687e+02 -1.779095604735100062e-06 2.110365339682528443e-02 -1.333419963412710470e-02 3.556263356547620380e-02 1.176156066246031480e-03 -2.915726925079367704e-04 -1.372058068253959344e-04 1.000000000000000000e+00 1.419082000000000043e+01 2.013793722724000190e+02 2.857738423630619764e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.631421465595319091e+02 -3.505829544571274576e-07 3.057060839682528355e-02 2.450720636587289808e-02 2.371273386547620085e-02 1.109560806246031196e-03 -4.451344925079367475e-04 -4.868320682539588849e-05 1.000000000000000000e+00 1.621808000000000050e+01 2.630261188864000133e+02 4.265778638189146477e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607404475404462687e+02 -6.698859808659203534e-08 4.212096239682527887e-02 4.201216436587289910e-02 1.535293186547620134e-02 1.200805636246031222e-03 -4.756955025079367830e-04 4.163935317460414412e-05 1.000000000000000000e+00 1.824533999999999878e+01 3.328924317155999688e+02 6.073735600077903655e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602563139919928403e+02 -1.244731173797263160e-08 3.868433239682528280e-02 3.198940136587289512e-02 1.951312986547620171e-02 1.210561816246031458e-03 -5.037184525079367245e-04 1.853174317460412092e-05 1.000000000000000000e+00 2.027260000000000062e+01 4.109783107599999994e+02 8.331598902713176358e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.597932184819765098e+02 -2.254732652415686417e-09 3.464322639682528016e-02 2.498494136587289804e-02 6.040923865476201249e-03 1.251570966246031346e-03 -3.408492325079367884e-04 -2.053166825395852726e-06 1.000000000000000000e+00 2.229986000000000246e+01 4.972837560196001050e+02 1.108935813951124146e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600692996257253071e+02 -3.990740854251533582e-10 3.209237439682528781e-02 1.811942636587289546e-02 2.605920586547620307e-02 1.177732906246031254e-03 -5.077881225079367488e-04 5.365363174604119087e-06 1.000000000000000000e+00 2.432712000000000074e+01 5.918087674944000582e+02 1.439700290388836947e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.605557611538409901e+02 -6.912161668563663771e-11 4.299601339682528056e-02 2.895994436587289583e-02 1.417107986547620074e-02 1.265060666246031361e-03 -7.339628625079367124e-04 1.238756831746040893e-04 1.000000000000000000e+00 2.635437999999999903e+01 6.945533451843999728e+02 1.830452278926084546e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.595077391981066626e+02 -1.172812338719269821e-11 3.317149439682529066e-02 1.328090936587289494e-02 1.022893186547620126e-02 1.374031606246031408e-03 -5.220871725079368267e-04 1.413575031746041374e-04 1.000000000000000000e+00 2.838164000000000087e+01 8.055174890896000761e+02 2.286190738904495811e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.587979640652715148e+02 -1.964186707357858839e-12 2.405623739682528558e-02 -1.810522634127103431e-03 1.576445486547620178e-02 1.135956976246031312e-03 -5.014120825079368057e-04 1.611867531746041847e-04 1.000000000000000000e+00 3.040890000000000271e+01 9.247011992100001407e+02 2.811914629665697794e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.596659555937277446e+02 -3.223083090335421760e-13 3.234481339682528100e-02 2.004408536587289763e-02 2.356408786547620204e-02 1.221481986246031413e-03 -6.670757425079366920e-04 1.487958231746040706e-04 1.000000000000000000e+00 3.243616000000000099e+01 1.052104475545600053e+03 3.412622910551317182e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.584320401508220471e+02 -9.003308688382024074e-14 3.619885939682528087e-02 2.789771365872894399e-03 9.189109865476198513e-03 1.135373276246031326e-03 -4.355060825079367357e-04 1.002332231746041503e-04 1.000000000000000000e+00 3.446341999999999928e+01 1.187727318096400040e+03 4.093314540902982844e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.580819809866689525e+02 -3.906123070653587881e-14 3.660551639682528557e-02 -1.860463412710344766e-05 2.714363586547620388e-02 1.120834376246031315e-03 -4.501944025079367639e-04 1.202024331746040682e-04 1.000000000000000000e+00 3.649067999999999756e+01 1.331569726862399875e+03 4.858988480062322924e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.594975650647641601e+02 1.601430181974213516e-14 3.905011839682528962e-02 9.654908365872898190e-03 1.281982286547620267e-02 1.076811816246031270e-03 -6.519448025079367355e-04 1.400206731746040907e-04 1.000000000000000000e+00 3.851794000000000295e+01 1.483631701843600240e+03 5.714643687370968837e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.605247214249999956e+02 9.795389708948228080e-02 3.677422139682529068e-02 2.608958736587289190e-02 2.185457486547620273e-02 1.235064666246031345e-03 -6.071577725079368385e-04 1.763112331746040417e-04 1.000000000000000000e+00 4.054520000000000124e+01 1.643913243039999998e+03 6.665279122170541086e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.615678287570015073e+02 7.636684347997682032e+00 2.837993739682528535e-02 3.336949636587289297e-02 2.712176086547619935e-02 1.121492386246031227e-03 -3.887845825079367800e-04 9.757465317460415049e-05 1.000000000000000000e+00 4.257245999999999952e+01 1.812414350451600058e+03 7.715893743802672543e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.606581922590170848e+02 1.688917484910420086e+01 3.424000439682528540e-02 5.953364365872893665e-03 1.839351286547620187e-02 1.118185646246031353e-03 -3.785339525079367985e-04 2.395393531746040213e-04 1.000000000000000000e+00 4.459972000000000492e+01 1.989135024078400420e+03 8.871486511608993169e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.585156749757550756e+02 1.131722623416632167e+01 3.749442739682529169e-02 -1.501305634127106381e-03 1.711901486547620296e-02 1.368664136246031289e-03 -5.395318625079368116e-04 1.879513531746040403e-04 1.000000000000000000e+00 4.662698000000000320e+01 2.174075263920400175e+03 1.013705638493112347e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.589431610190735000e+02 1.345714208625528263e+00 3.218309039682527850e-02 -7.129233634127103703e-03 2.217183586547620197e-02 1.429032466246031368e-03 -5.373530925079368203e-04 1.592906031746042046e-04 1.000000000000000000e+00 4.865424000000000149e+01 2.367235069977600233e+03 1.151760232311069558e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588859099636547398e+02 -3.786197907636791982e+00 2.637535539682528754e-02 -1.390411634127106111e-03 1.310852586547620047e-02 1.517677216246031326e-03 -5.291699825079366776e-04 1.052765531746040640e-04 1.000000000000000000e+00 5.068149999999999977e+01 2.568614442250000138e+03 1.301812328548933729e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.584379032107140688e+02 -4.100675927724760861e+00 2.384725139682528430e-02 -1.080336163412710590e-02 -4.173090134523799177e-03 1.358116916246031227e-03 -4.800622625079367331e-04 5.590095317460413646e-05 1.000000000000000000e+00 5.270875999999999806e+01 2.778213380737599891e+03 1.464361823140867637e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.595410206851418025e+02 -2.630373115496683400e+00 1.004822839682528376e-02 9.314062365872892435e-03 -9.878861345237952007e-04 1.325770276246031245e-03 -4.428060525079367620e-04 -2.427069682539584328e-05 1.000000000000000000e+00 5.473602000000000345e+01 2.996031885440400401e+03 1.639908612021034642e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607257898907158165e+02 -1.286200190109046071e+00 2.464792639682528499e-02 2.035648336587289609e-02 -6.855731345237967012e-04 1.419879466246031343e-03 -6.113658025079368383e-04 1.115435631746041455e-04 1.000000000000000000e+00 5.676328000000000173e+01 3.222069956358400304e+03 1.828952591123596649e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588783224743828555e+02 -5.223127938147428262e-01 2.786826139682528278e-02 1.117468365872894415e-03 -1.241363713452380002e-02 1.415631896246031260e-03 -4.147048725079367825e-04 -1.723451682539593396e-05 1.000000000000000000e+00 5.879054000000000002e+01 3.456327593491600055e+03 2.031993656382716435e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588356428472260973e+02 -1.842356108573543483e-01 2.425059939682528559e-02 -4.276288634127104610e-03 -1.091986813452380106e-02 1.392750786246031280e-03 -4.490394525079367555e-04 -1.003586682539589405e-05 1.000000000000000000e+00 6.081780000000000541e+01 3.698804796840000563e+03 2.249531703732558235e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.597484695395635299e+02 -5.807770807625862314e-02 1.325085839682528521e-02 -3.310795634127106785e-03 2.611598386547619999e-02 1.344393666246031368e-03 -5.894356525079367040e-04 -4.194197682539594491e-05 1.000000000000000000e+00 6.284506000000000370e+01 3.949501566403600464e+03 2.482066629107282788e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.586971711680070598e+02 -1.669108953593786623e-02 1.520021739682528641e-02 -6.521448634127104127e-03 1.323596186547620207e-02 1.018124536246031329e-03 -5.651434125079368188e-04 -1.186629568253958888e-04 1.000000000000000000e+00 6.487232000000000198e+01 4.208417902182400212e+03 2.730098328441053745e+05 6.637850845511725772e-01 -0.000000000000000000e+00 -0.000000000000000000e+00 2.578038305276642745e+02 -4.438179736810902651e-03 1.418104939682528556e-02 -1.458225563412710556e-02 2.076608686547620070e-02 7.166574462460313308e-04 -6.010164225079367385e-04 -2.031235568253959454e-04 1.000000000000000000e+00 6.689958000000000027e+01 4.475553804176400263e+03 2.994126697668033885e+05 2.437840493460591773e+01 -0.000000000000000000e+00 -0.000000000000000000e+00 2.575900012845362994e+02 -1.104415351769467155e-03 1.171448539682528461e-02 -6.411356341271060022e-04 2.179420786547620059e-02 7.711998362460313790e-04 -5.958785525079367436e-04 -1.778974268253958766e-04 1.000000000000000000e+00 6.892683999999999855e+01 4.750909272385600161e+03 3.274651632722386275e+05 1.195928942034693989e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.592522649854503811e+02 -2.595046810460775255e-04 5.653468396825284473e-03 -3.306909634127105230e-03 3.415740386547620050e-02 7.991702162460313699e-04 -5.105784425079367903e-04 -2.023469768253959109e-04 1.000000000000000000e+00 7.095409999999999684e+01 5.034484306809999907e+03 3.572173029538273695e+05 3.362968463074205374e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.579003985729477790e+02 -5.799523371039054791e-05 4.075954396825285861e-03 -5.813851634127106816e-03 3.851734186547620120e-02 8.126851062460313437e-04 -4.455600825079367448e-04 -3.203095468253959032e-04 1.000000000000000000e+00 7.298135999999999513e+01 5.326278907449599501e+03 3.887190784049858339e+05 7.244798546627382620e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.577110562270163996e+02 -1.240153176296573148e-05 1.982903996825284912e-03 -5.751847634127105896e-03 1.817295686547620165e-02 6.980794162460313449e-04 -3.607846825079367298e-04 -3.361090868253959027e-04 1.000000000000000000e+00 7.500862000000000762e+01 5.626293074304400761e+03 4.220204792191306478e+05 1.334131512685706639e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.593279674352351662e+02 -2.549813093163372416e-06 2.012354196825284422e-03 -3.176191634127106811e-03 2.634695186547620152e-02 5.562481362460312394e-04 -4.909143225079367614e-04 -2.835488168253958450e-04 1.000000000000000000e+00 7.703588000000000591e+01 5.934526807374400960e+03 4.571714949896775070e+05 2.215241413792596632e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604506571831263386e+02 -5.061830223553558920e-07 3.248753396825284495e-03 5.653695365872894729e-03 3.363641326547620047e-02 4.461581362460312686e-04 -5.631164925079367844e-04 -1.737951468253959427e-04 1.000000000000000000e+00 7.906314000000000419e+01 6.250980106659601006e+03 4.942221153100429801e+05 3.417799151399689890e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602953723174513812e+02 -9.736609629218024716e-08 6.825325968252849568e-04 1.423937136587289515e-02 3.023103586547620097e-02 7.006392762460313377e-04 -5.004090925079366942e-04 -1.539339168253958537e-04 1.000000000000000000e+00 8.109040000000000248e+01 6.575652972159999990e+03 5.332223297736432869e+05 4.991794318923266474e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.606369817776421769e+02 -1.820092841812642467e-08 -1.136286590317471534e-02 3.619031336587289621e-02 1.424289986547620096e-02 5.533487362460313193e-04 -4.338583525079367596e-04 -1.890155468253958962e-04 1.000000000000000000e+00 8.311766000000000076e+01 6.908545403875599732e+03 5.742221279738948215e+05 6.987216509779604166e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.593342616024719973e+02 -3.315296284192901071e-09 -5.857725263174714918e-03 2.357598136587289728e-02 1.897169486547620187e-02 7.518108062460313089e-04 -5.384554125079367383e-04 -1.363035768253958785e-04 1.000000000000000000e+00 8.514491999999999905e+01 7.249657401806400230e+03 6.172714995042138034e+05 9.454055317384982118e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.586453520651357962e+02 -5.897348231645986935e-10 1.111030896825284872e-03 2.246285136587289344e-02 2.219625186547620130e-02 6.593569362460313795e-04 -4.778790125079367536e-04 -7.630101682539586726e-05 1.000000000000000000e+00 8.717217999999999734e+01 7.598988965952399667e+03 6.624204339580164524e+05 1.244230033515567993e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590023951682784400e+02 -1.026285419063397840e-10 1.848365996825284893e-03 1.420209336587289345e-02 2.652135286547620263e-02 9.330586362460312937e-04 -5.569034125079367487e-04 -8.223069682539586433e-05 1.000000000000000000e+00 8.919944000000000983e+01 7.956540096313601680e+03 7.097189209287194535e+05 1.600194115650800268e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602716614134758402e+02 -1.749597348817579870e-11 -4.677688603174715194e-03 1.815530536587289800e-02 5.745579865476198311e-03 6.605902962460313572e-04 -5.903785325079367440e-04 -1.106166468253958835e-04 1.000000000000000000e+00 9.122670000000000812e+01 8.322310792890000812e+03 7.592169500097383279e+05 2.018296737485818085e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604482429819940421e+02 -2.915444713749153634e-12 9.829689682528536254e-05 1.461135536587289396e-02 1.032855886547619922e-02 6.060708362460314087e-04 -5.028199025079367092e-04 9.170133174604125012e-06 1.000000000000000000e+00 9.325396000000000640e+01 8.696301055681600701e+03 8.109645107944898773e+05 2.503536858362251587e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.603872844034092395e+02 -4.662988791519401875e-13 -8.091808403174714781e-03 2.668391636587289645e-02 9.499642865476200237e-03 6.190488562460314068e-04 -5.573827825079367406e-04 -1.419941268253958845e-04 1.000000000000000000e+00 9.528122000000000469e+01 9.078510884688401347e+03 8.650115928763902048e+05 3.060913437621728735e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604142717232071504e+02 -8.815554369269722195e-14 7.379531968252847629e-04 1.966617536587289550e-02 5.218423865476204404e-03 7.821939762460313177e-04 -6.720836925079368140e-04 -1.368856682539584639e-05 1.000000000000000000e+00 9.730848000000000297e+01 9.468940279910400932e+03 9.214081858488556463e+05 3.695425434605876944e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.596725946468405937e+02 -4.790854546882667301e-14 3.729469396825285318e-03 1.677155036587289760e-02 9.729758654761985759e-04 7.744619962460313600e-04 -6.579227325079367063e-04 3.219561317460413550e-05 1.000000000000000000e+00 9.933574000000000126e+01 9.867589241347599454e+03 9.802042793053025380e+05 4.412071808656324720e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.581306961166553151e+02 -1.980567423292065087e-14 1.638672296825285576e-03 -6.475722634127104721e-03 1.390103865476201295e-03 4.816735362460312572e-04 -6.694806825079367436e-04 -9.350514682539593728e-05 1.000000000000000000e+00 1.013629999999999995e+02 1.027445776900000055e+04 1.041449862839146983e+06 5.215851519114699477e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.583217628919668982e+02 -1.405585884156201381e-14 7.728531396825284727e-03 -3.856817634127103489e-03 5.960830865476204887e-03 3.423149362460312529e-04 -7.660289725079367888e-04 2.281447317460411506e-05 1.000000000000000000e+00 1.033902599999999978e+02 1.068954586286759877e+04 1.105194926043805433e+06 6.111763525322629721e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.598821128165189407e+02 -1.691532273721650273e-14 2.477927296825284398e-03 1.116856365872893886e-03 9.179691865476201362e-03 7.097850162460313164e-04 -8.175605915079367601e-04 -5.294306825395908231e-06 1.000000000000000000e+00 1.054175199999999961e+02 1.111285352295039957e+04 1.171489458512694109e+06 7.104806786621743231e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.585404205373588979e+02 5.840602392497974451e-15 -3.963158031747146190e-04 7.451014365872893341e-03 3.865376865476201351e-03 5.380693362460314128e-04 -7.396422825079367394e-04 -2.474268682539594241e-05 1.000000000000000000e+00 1.074447800000000086e+02 1.154438074924840112e+04 1.240383449839229695e+06 8.199980262353675789e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.574534264776349914e+02 1.712000128500727594e-14 -8.327767103174715108e-03 6.492053658728944021e-04 -4.315605134523795017e-03 4.314180362460313858e-04 -5.235343725079368016e-04 -1.426233668253959388e-04 1.000000000000000000e+00 1.094720400000000069e+02 1.198412754176160161e+04 1.311926889616827713e+06 9.402282911860039167e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.587782193488699249e+02 1.886219388254293476e-14 4.761096396825285557e-03 -7.202196341271061009e-04 -2.113392134523800481e-03 4.052769362460314270e-04 -7.262424025079366931e-04 -9.712075682539588351e-05 1.000000000000000000e+00 1.114993000000000052e+02 1.243209390049000103e+04 1.386169767438904848e+06 1.071671369448246987e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.598656445159390387e+02 2.347227720962027251e-14 -4.165797203174715496e-03 1.295209736587289717e-02 -1.783551213452379963e-02 4.884648362460312747e-04 -5.813059725079367619e-04 -7.004130682539588988e-05 1.000000000000000000e+00 1.135265600000000035e+02 1.288827982543360122e+04 1.463162072898877319e+06 1.214827156956259423e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600989598110621728e+02 7.813072328225483567e-15 1.221070796825285756e-03 1.337387336587289588e-02 -1.252786513452380096e-02 2.161711362460314121e-04 -5.074466025079367101e-04 2.142214317460411615e-05 1.000000000000000000e+00 1.155538200000000018e+02 1.335268531659240034e+04 1.542953795590161346e+06 1.370195549644204148e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.608026132195976174e+02 -8.925257391752444914e-15 1.228668539682528649e-02 1.208959736587289502e-02 -2.235864113452379343e-02 1.684635362460312931e-04 -2.464530425079367254e-04 1.124107331746041069e-04 1.000000000000000000e+00 1.175810800000000000e+02 1.382531037396640022e+04 1.625594925106173148e+06 1.538276443446243939e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.599810088414655525e+02 -1.025966746681070654e-14 2.181112039682528425e-02 -1.205161763412710557e-02 -1.086435413452380150e-02 -5.987476375396861422e-05 -3.407551025079368036e-04 1.726038431746041530e-04 1.000000000000000000e+00 1.196083399999999983e+02 1.430615499755559904e+04 1.711135451040329412e+06 1.719569734296541719e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590798220474434288e+02 -1.906044947566650386e-14 1.003784239682528612e-02 6.137143365872895634e-03 3.477642546547619895e-02 -2.676582637539685807e-04 -2.744146425079367797e-04 7.012074317460411776e-05 1.000000000000000000e+00 1.216356000000000108e+02 1.479521918736000225e+04 1.799625362986046588e+06 1.914575318129261141e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.594494534850605305e+02 -2.578066919736734499e-14 -2.027138103174714809e-03 -6.372505634127105523e-03 2.919624086547620290e-02 -3.534829637539685723e-04 -3.414351725079367138e-04 -9.094636825395874605e-06 1.000000000000000000e+00 1.236628600000000091e+02 1.529250294337960258e+04 1.891114650536739733e+06 2.123793090878562944e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.580147282408462956e+02 -1.888533906750968316e-14 -1.798189060317471888e-02 8.892993658728941264e-04 1.529699586547620185e-02 -1.785335637539686715e-04 -3.668640225079367609e-04 -1.523243868253959478e-04 1.000000000000000000e+00 1.256901200000000074e+02 1.579800626561440185e+04 1.985653303285826230e+06 2.347722948478611070e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.573028837927551535e+02 -1.384973992480027394e-14 -2.226030160317471474e-02 -1.401617563412710550e-02 9.232429865476204922e-03 -2.170017637539685754e-04 -6.020543625079367335e-04 -1.919957668253958593e-04 1.000000000000000000e+00 1.277173800000000057e+02 1.631172915406440188e+04 2.083291310826721834e+06 2.586864786863568006e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.578661686497533765e+02 6.466134453156046314e-15 -2.225478460317471471e-02 -4.346986634127105592e-03 4.281016865476203193e-03 7.098093624603144143e-05 -4.939255525079367390e-04 -1.331850268253959284e-04 1.000000000000000000e+00 1.297446400000000040e+02 1.683367160872960085e+04 2.184078662752842996e+06 0.000000000000000000e+00 5.310280676409380618e+00 -0.000000000000000000e+00 2.591778558577004219e+02 3.881029512404210243e-14 -3.350587260317471061e-02 3.708508365872893731e-03 3.303729865476202898e-03 4.290136246031276898e-06 -4.810798125079367789e-04 -1.990675968253958908e-04 1.000000000000000000e+00 1.317719000000000165e+02 1.736383362961000603e+04 2.288065348657606635e+06 0.000000000000000000e+00 5.366368736595970290e+01 -0.000000000000000000e+00 2.586265557848932417e+02 2.496704000974017557e-14 -4.055766460317471178e-02 2.324536365872896526e-03 -1.314141813452379681e-02 -2.111011637539687423e-04 -4.601720925079367608e-04 -2.897881768253959302e-04 1.000000000000000000e+00 1.337991600000000005e+02 1.790221521670560105e+04 2.395301358134427108e+06 0.000000000000000000e+00 1.950272394768473418e+02 -0.000000000000000000e+00 2.590581804587237684e+02 1.020916719346166513e-14 -3.380893360317471785e-02 4.358221365872893410e-03 -1.662428913452379531e-02 -3.211422637539687076e-04 -4.006317125079367453e-04 -1.464107968253959514e-04 1.000000000000000000e+00 1.358264200000000130e+02 1.844881637001640411e+04 2.505836680776723661e+06 0.000000000000000000e+00 4.793905304253556778e+02 -0.000000000000000000e+00 2.583827214705520987e+02 2.027411219651766781e-14 -3.782695560317471395e-02 1.149229936587289197e-02 -1.630400713452379718e-02 -2.047094637539685711e-04 -2.136010125079367472e-04 -1.059907068253958815e-04 1.000000000000000000e+00 1.378536799999999971e+02 1.900363708954240064e+04 2.619721306177909020e+06 0.000000000000000000e+00 9.567431536277551913e+02 -0.000000000000000000e+00 2.588504729398947006e+02 -2.959667608385212738e-16 -2.737255860317471326e-02 2.306047836587289679e-02 -1.175693013452380059e-02 -1.525203637539687424e-04 -2.631168025079367104e-04 -9.378550682539587170e-05 1.000000000000000000e+00 1.398809400000000096e+02 1.956667737528360158e+04 2.737005223931403365e+06 0.000000000000000000e+00 1.677074702500338617e+03 -0.000000000000000000e+00 2.575437003556809259e+02 -1.510831198685233849e-14 -2.817193160317471579e-02 -8.620721634127109789e-03 -1.014567713452380060e-02 -2.024390637539686885e-04 2.442606749206328864e-05 -1.798543568253958532e-04 1.000000000000000000e+00 1.419081999999999937e+02 2.013793722723999963e+04 2.857738423630618956e+06 0.000000000000000000e+00 2.690374770459364299e+03 -0.000000000000000000e+00 2.571050917428615890e+02 -1.222166334909334567e-14 -3.965530660317471978e-02 -7.093271634127106678e-03 -2.676973013452380035e-02 -2.402326637539686175e-04 -1.294388825079367941e-04 -2.178491468253959109e-04 1.000000000000000000e+00 1.439354600000000062e+02 2.071741664541160208e+04 2.981970894868975971e+06 0.000000000000000000e+00 4.046632950921140036e+03 -0.000000000000000000e+00 2.578799640847943806e+02 -2.669000963219823434e-14 -4.578354560317471345e-02 -1.935690153412710640e-02 -1.530625134523795616e-03 -3.285852637539686972e-04 -2.997716825079367771e-04 -1.772051168253958690e-04 1.000000000000000000e+00 1.459627199999999903e+02 2.130511562979839800e+04 3.109752627239886671e+06 0.000000000000000000e+00 5.795838837301906096e+03 -0.000000000000000000e+00 2.593543512047501167e+02 -1.557425937872241460e-14 -5.462931060317471887e-02 -1.786486341271049938e-04 -2.675493513452380234e-02 -3.041632637539686251e-04 -2.994083325079367969e-04 -2.266904168253959084e-04 1.000000000000000000e+00 1.479899800000000027e+02 2.190103418040040197e+04 3.241133610336771701e+06 0.000000000000000000e+00 7.987982023017991196e+03 -0.000000000000000000e+00 2.592103613515139955e+02 1.025550116606464834e-14 -5.747345460317471177e-02 -2.301652634127106245e-03 -3.055690313452380513e-02 -1.852517637539686981e-04 -7.782878250793675776e-05 -2.941239768253959370e-04 1.000000000000000000e+00 1.500172400000000152e+02 2.250517229721760305e+04 3.376163833753045183e+06 0.000000000000000000e+00 1.067305210148565311e+04 -0.000000000000000000e+00 2.593191728453962241e+02 7.314866478049465998e-15 -4.823187060317471464e-02 1.890068236587289646e-02 -4.777992713452379470e-02 -3.452388637539688387e-04 -1.024134925079367604e-04 -3.109670268253958468e-04 1.000000000000000000e+00 1.520444999999999993e+02 2.311752998025000124e+04 3.514893287082121242e+06 0.000000000000000000e+00 1.390103866612112324e+04 -0.000000000000000000e+00 2.592008769899240974e+02 2.221644269997001270e-14 -4.118386960317471646e-02 1.733267436587289378e-02 -4.355931913452379400e-02 -3.705732637539686618e-04 -2.771284925079367436e-04 -1.953945868253958865e-04 1.000000000000000000e+00 1.540717600000000118e+02 2.373810722949760384e+04 3.657371959917420056e+06 0.000000000000000000e+00 1.772193131034077305e+04 -0.000000000000000000e+00 2.594888566963954304e+02 3.423980053733376596e-14 -3.876614060317470911e-02 1.016017036587289757e-02 -5.628503713452380486e-02 -3.304482637539686487e-04 -1.241367425079367053e-04 -9.316598682539590105e-05 1.000000000000000000e+00 1.560990199999999959e+02 2.436690404496039991e+04 3.803649841852353886e+06 0.000000000000000000e+00 2.218571962756076755e+04 -0.000000000000000000e+00 2.592471779187787320e+02 2.090355192478126067e+00 -4.206244260317471007e-02 1.105673136587289468e-02 -4.754148013452379196e-02 -2.150553637539685901e-04 -3.158812625079367815e-04 -1.838400068253959359e-04 1.000000000000000000e+00 1.581262800000000084e+02 2.500392042663840402e+04 3.953776922480343841e+06 0.000000000000000000e+00 2.734239321119751912e+04 -0.000000000000000000e+00 2.595144644726888146e+02 1.390900631135700216e+01 -4.419308660317471105e-02 2.374663636587289600e-02 -5.757486113452379983e-02 -3.322781637539685886e-04 3.979992749206327091e-05 -1.636741968253958715e-04 1.000000000000000000e+00 1.601535399999999925e+02 2.564915637453159798e+04 4.107803191394800786e+06 0.000000000000000000e+00 3.324194165466715640e+04 -0.000000000000000000e+00 2.594934264342520578e+02 1.557696618507103814e+01 -4.037264960317471507e-02 1.567967136587289367e-02 -6.731542113452379517e-02 -3.889040637539684965e-04 6.342409749206321789e-05 -1.471519668253958805e-04 1.000000000000000000e+00 1.621808000000000050e+02 2.630261188863999996e+04 4.265778638189146295e+06 0.000000000000000000e+00 3.993435455138613179e+04 -0.000000000000000000e+00 2.599376394425571561e+02 6.004799507075502696e+00 -2.857072660317471618e-02 1.227729936587289294e-02 -4.839276813452379755e-02 -4.437891637539687073e-04 9.347311749206322923e-05 -8.371388682539590391e-05 1.000000000000000000e+00 1.642080600000000175e+02 2.696428696896360634e+04 4.427753252456794493e+06 0.000000000000000000e+00 4.746962149477063213e+04 -0.000000000000000000e+00 2.583130545235978275e+02 -1.994345614804481137e+00 -3.650895860317471264e-02 6.498904365872894273e-03 -2.158240113452379594e-02 -4.707137637539686968e-04 5.781790749206320440e-05 -1.285526168253958669e-04 1.000000000000000000e+00 1.662353200000000015e+02 2.763418161550239893e+04 4.593777023791158572e+06 0.000000000000000000e+00 5.589773207823683333e+04 -0.000000000000000000e+00 2.576154366178921009e+02 -4.354781600224979066e+00 -3.754501060317471522e-02 -1.127231463412710355e-02 -2.067503813452380157e-02 -4.761822637539686598e-04 1.106139774920631693e-04 -2.297192168253959360e-04 1.000000000000000000e+00 1.682625800000000140e+02 2.831229582825640318e+04 4.763899941785659641e+06 0.000000000000000000e+00 6.526867589520123147e+04 -0.000000000000000000e+00 2.582447671201464914e+02 -3.421137938612250018e+00 -3.709204260317471025e-02 -2.815319033412710253e-02 -3.209472813452379780e-02 -4.201502637539685295e-04 1.587400974920632384e-04 -1.486038868253958603e-04 1.000000000000000000e+00 1.702898399999999981e+02 2.899862960722560092e+04 4.938171996033710428e+06 0.000000000000000000e+00 7.563244253907985694e+04 -0.000000000000000000e+00 2.578366809073544346e+02 -1.884871573445409121e+00 -4.559719660317471113e-02 -2.012774773412710425e-02 -4.258769413452380415e-02 -5.238649637539687445e-04 1.121453374920631770e-04 -3.780857468253959143e-04 1.000000000000000000e+00 1.723171000000000106e+02 2.969318295241000305e+04 5.116643176128730178e+06 0.000000000000000000e+00 8.703902160328927857e+04 -0.000000000000000000e+00 2.575530122743335824e+02 -8.333182129281194728e-01 -5.434145660317471482e-02 -3.934316634127105194e-03 -2.802218613452379936e-02 -6.544949637539688135e-04 -4.547183250793677273e-05 -4.325602468253959159e-04 1.000000000000000000e+00 1.743443599999999947e+02 3.039595586380959867e+04 5.299363471664131619e+06 0.000000000000000000e+00 9.953840268124543945e+04 -0.000000000000000000e+00 2.582481297574381642e+02 -3.138703752643597356e-01 -7.287733960317471782e-02 -5.080906634127104610e-03 -3.453186913452380158e-02 -4.803973637539688153e-04 1.780781974920633099e-04 -5.289674068253958326e-04 1.000000000000000000e+00 1.763716200000000072e+02 3.110694834142440232e+04 5.486382872233334929e+06 0.000000000000000000e+00 1.131805753663649812e+05 -0.000000000000000000e+00 2.585080012650139452e+02 -1.043136344467513188e-01 -6.379200960317471525e-02 -1.374258063412710576e-02 -2.450723213452379867e-02 -4.271225637539686863e-04 1.437427974920632524e-04 -5.507417468253958956e-04 1.000000000000000000e+00 1.783988800000000197e+02 3.182616038525440672e+04 5.677751367429755628e+06 0.000000000000000000e+00 1.280155292520640214e+05 -0.000000000000000000e+00 2.589810122439655515e+02 -3.132084140102636693e-02 -7.903580860317471757e-02 -1.139652463412710488e-02 -3.978782313452379482e-02 -7.604801637539687284e-04 7.115520749206329099e-05 -5.629854968253959323e-04 1.000000000000000000e+00 1.804261400000000037e+02 3.255359199529960097e+04 5.873518946846805513e+06 0.000000000000000000e+00 1.440932539317586052e+05 -0.000000000000000000e+00 2.592973588137387537e+02 -8.642004611292256402e-03 -7.117489360317472147e-02 6.563063658728933436e-04 -1.220494713452379559e-02 -1.040174863753968570e-03 1.255475674920631573e-04 -4.912412868253959080e-04 1.000000000000000000e+00 1.824534000000000162e+02 3.328924317156000325e+04 6.073735600077906623e+06 0.000000000000000000e+00 1.614637389988654468e+05 -0.000000000000000000e+00 2.597658000548336190e+02 -2.219631667718219379e-03 -7.383044660317471253e-02 1.410036136587289324e-02 7.094414865476204868e-03 -1.154148363753968592e-03 1.615656974920631457e-04 -4.779029468253959113e-04 1.000000000000000000e+00 1.844806600000000003e+02 3.403311391403560265e+04 6.278451316716470756e+06 0.000000000000000000e+00 1.801769740468003438e+05 -0.000000000000000000e+00 2.600563664053747743e+02 -5.360794304548768905e-04 -6.586775660317471803e-02 1.655973336587289457e-02 -3.370324134523795812e-03 -1.024209463753968704e-03 2.467476974920631986e-04 -3.545917268253958969e-04 1.000000000000000000e+00 1.865079200000000128e+02 3.478520422272640280e+04 6.487716086355919018e+06 0.000000000000000000e+00 2.002829486689801270e+05 -0.000000000000000000e+00 2.602901128078428314e+02 -1.227230843113079188e-04 -7.049731160317471157e-02 2.987536736587289438e-02 -2.844490013452380395e-02 -1.023074663753968574e-03 3.075099974920633130e-04 -4.335420468253959139e-04 1.000000000000000000e+00 1.885351799999999969e+02 3.554551409763239644e+04 6.701579898589661345e+06 0.000000000000000000e+00 2.218316524588204629e+05 -0.000000000000000000e+00 2.598264999690118202e+02 -2.680547757517946417e-05 -6.999111960317472292e-02 3.268389136587289412e-02 -1.014415313452379785e-02 -1.264280463753968825e-03 3.754828974920633141e-04 -4.390807868253959116e-04 1.000000000000000000e+00 1.905624400000000094e+02 3.631404353875360539e+04 6.920092743011121638e+06 0.000000000000000000e+00 2.448730750097382988e+05 -0.000000000000000000e+00 2.593596969358279694e+02 -5.616675076130314912e-06 -7.061410360317471602e-02 1.946565236587289444e-02 1.298353186547620067e-02 -1.411778063753968502e-03 3.464828974920631513e-04 -4.315975268253958975e-04 1.000000000000000000e+00 1.925896999999999935e+02 3.709079254609000054e+04 7.143304609213708900e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.050658692729931676e-01 2.590111897581169842e+02 -1.134042666258773269e-06 -6.430232360317471307e-02 9.387877365872897284e-03 1.989402986547620170e-02 -1.206621563753968590e-03 3.619273974920631963e-04 -2.785400868253959123e-04 1.000000000000000000e+00 1.946169600000000059e+02 3.787576111964160373e+04 7.371265486790845171e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.792219728288165825e+01 2.592187248340080146e+02 -2.214888922805985211e-07 -6.659902160317471287e-02 2.656261365872894520e-03 3.570156865476202535e-03 -1.165767363753968631e-03 2.659288974920633259e-04 -4.230125468253958989e-04 1.000000000000000000e+00 1.966442200000000184e+02 3.866894925940840767e+04 7.604025365335945040e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.001701567040806395e+02 2.568569463550700789e+02 -4.198101968002987330e-08 -7.323089060317471144e-02 -1.627500873412710686e-02 -1.577161013452380023e-02 -1.102308463753968581e-03 3.161176974920632205e-04 -5.168182368253959342e-04 1.000000000000000000e+00 1.986714800000000025e+02 3.947035696539039782e+04 7.841634234442420304e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.969385375491461332e+02 2.567240790713361207e+02 -7.743329643075380204e-09 -6.897513560317471148e-02 -1.611915993412710302e-02 -2.616736134523796331e-03 -1.200253663753968539e-03 1.929508974920633073e-04 -5.036689468253959488e-04 1.000000000000000000e+00 2.006987400000000150e+02 4.027998423758760327e+04 8.084142083703693934e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.582169332343630686e+02 2.586193994029661667e+02 -1.393367178829911838e-09 -5.634666760317472156e-02 -1.460659013412710441e-02 -7.355770134523799408e-03 -1.106632863753968712e-03 2.397183974920633369e-04 -2.815068668253959110e-04 1.000000000000000000e+00 2.027259999999999991e+02 4.109783107600000221e+04 8.331598902713175863e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.233994937175999667e+03 2.583841010140561707e+02 -2.451015754665610884e-10 -5.365690960317472114e-02 -3.176442634127106535e-03 -1.357681813452379926e-02 -1.359759063753968565e-03 3.363015974920633170e-04 -3.537066968253959058e-04 1.000000000000000000e+00 2.047532600000000116e+02 4.192389748062760191e+04 8.584054681064289063e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.074262142790351845e+03 2.580570629821268653e+02 -4.223306201495954454e-11 -6.084147260317471217e-02 -3.061121634127104973e-03 -7.098403134523798008e-03 -1.362605363753968592e-03 3.012735974920632180e-04 -3.800203668253959157e-04 1.000000000000000000e+00 2.067805199999999957e+02 4.275818345147039508e+04 8.841559408350443467e+06 0.000000000000000000e+00 0.000000000000000000e+00 3.229008143493673742e+03 2.579470443876787158e+02 -7.156816105084053148e-12 -5.542562460317471129e-02 -7.138105634127103749e-03 -8.081440134523797114e-03 -1.391156263753968482e-03 4.135093974920632997e-04 -3.021409368253959084e-04 1.000000000000000000e+00 2.088077800000000082e+02 4.360068898852840357e+04 9.104163074165061116e+06 0.000000000000000000e+00 0.000000000000000000e+00 4.748222532702277931e+03 2.599391914289993224e+02 -1.207054311834787104e-12 -3.929887060317471814e-02 -1.462635634127105316e-03 8.798824865476201351e-03 -1.426038263753968451e-03 4.433595974920632532e-04 -2.323506468253958533e-04 1.000000000000000000e+00 2.108350399999999922e+02 4.445141409180159826e+04 9.371915668101552874e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.681894903832399905e+03 2.598000299780277942e+02 -2.147932142301532625e-13 -3.044666260317471437e-02 3.492261365872894457e-03 1.358364186547620159e-02 -1.401727663753968636e-03 6.015499974920632828e-04 -3.316270368253958971e-04 1.000000000000000000e+00 2.128623000000000047e+02 4.531035876129000098e+04 9.644867179753340781e+06 0.000000000000000000e+00 0.000000000000000000e+00 9.080014850300372927e+03 2.586574230707965967e+02 -4.005466736646009179e-14 -2.476531660317471406e-02 -5.519612634127105122e-03 -4.220371134523795420e-03 -1.356848963753968466e-03 6.767285974920632342e-04 -1.464826568253959098e-04 1.000000000000000000e+00 2.148895600000000172e+02 4.617752299699360447e+04 9.923067598713837564e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.199257196552245478e+04 2.580585287471001266e+02 5.576182320169238469e-15 -2.481630060317471451e-02 -2.066747903412710641e-02 -8.855922134523797062e-03 -1.513199763753968717e-03 7.608736974920631479e-04 -2.132055668253959218e-04 1.000000000000000000e+00 2.169168200000000013e+02 4.705290679891240143e+04 1.020656691457645781e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.546955584291486957e+04 2.596834585519351322e+02 2.568277743391322110e-14 -1.720004460317471617e-02 -3.208105634127104283e-03 6.712917865476203394e-03 -1.568501263753968675e-03 8.467745974920633202e-04 -4.144829682539588698e-05 1.000000000000000000e+00 2.189440800000000138e+02 4.793651016704640642e+04 1.049541511693462171e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.956095607589399515e+04 2.602373452688122484e+02 2.864205519224055026e-14 -6.253708943174715595e-03 -9.945376634127107290e-03 4.145976865476200257e-03 -1.333169263753968686e-03 9.531898974920633176e-04 3.677603317460409731e-05 1.000000000000000000e+00 2.209713399999999979e+02 4.882833310139559762e+04 1.078966219538174197e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.431676225787599833e+04 2.597039792615581746e+02 4.243141342386595795e-14 1.595174939682528562e-02 -1.055324663412710549e-02 -1.035979134523801193e-03 -1.308523563753968659e-03 9.431188974920631492e-04 1.944633531746040077e-04 1.000000000000000000e+00 2.229986000000000104e+02 4.972837560196000413e+04 1.108935813951123878e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.978696398227728423e+04 2.587581297465923740e+02 5.218249358888024564e-14 1.167652339682528559e-02 -3.154518813412710704e-02 -1.016064713452379670e-02 -1.509701063753968696e-03 1.229371797492063208e-03 3.136779317460414856e-05 1.000000000000000000e+00 2.250258599999999944e+02 5.063663766873959685e+04 1.139455293891652301e+07 0.000000000000000000e+00 0.000000000000000000e+00 3.602155084251398512e+04 2.603926301344870922e+02 3.008952168432361289e-14 5.463445239682528098e-02 1.774224736587289714e-02 -2.389310713452379165e-02 -1.121983163753968726e-03 1.694905097492063056e-03 6.866294531746040862e-04 1.000000000000000000e+00 2.270531200000000069e+02 5.155311930173440487e+04 1.170529658319101855e+07 0.000000000000000000e+00 0.000000000000000000e+00 4.307051243200255703e+04 2.602516580326667963e+02 -6.752827934503707979e-15 4.810599839682529189e-02 -1.496451783412710429e-02 -1.839609713452379502e-02 -1.091134563753968535e-03 2.077571597492063049e-03 6.492220531746040147e-04 1.000000000000000000e+00 2.290803800000000194e+02 5.247782050094440638e+04 1.202163906192813627e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.098383834415919409e+04 2.592875295662722124e+02 -5.123726758011432889e-14 5.266199139682528618e-02 -3.588364363412710478e-02 -2.446927813452379197e-02 -1.311484563753968768e-03 2.101589897492062934e-03 4.946715531746040178e-04 1.000000000000000000e+00 2.311076400000000035e+02 5.341074126636960136e+04 1.234363036472129077e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.981151817240004311e+04 2.592308820611373790e+02 -4.564269168653986185e-14 6.118641339682527602e-02 -3.072892273412710512e-02 -5.481309213452380258e-02 -1.255843163753968695e-03 2.399166697492063124e-03 8.114795531746040380e-04 1.000000000000000000e+00 2.331349000000000160e+02 5.435188159801000438e+04 1.267132048116390407e+07 0.000000000000000000e+00 0.000000000000000000e+00 6.960354151014162926e+04 2.595619725021097111e+02 -7.003595564454634878e-15 7.061374139682527473e-02 -2.235936743412710384e-02 -9.913570134523798372e-03 -1.268399263753968668e-03 2.261032397492063195e-03 8.009784531746040040e-04 1.000000000000000000e+00 2.351621600000000001e+02 5.530124149586560088e+04 1.300475940084938519e+07 0.000000000000000000e+00 0.000000000000000000e+00 8.040989795079996111e+04 2.590598186666346123e+02 6.860929725324630356e-16 7.201945939682527498e-02 -3.425773313412710380e-02 -2.546723413452380014e-02 -1.075733563753968579e-03 2.168366397492063266e-03 7.489506531746041021e-04 1.000000000000000000e+00 2.371894200000000126e+02 5.625882095993640542e+04 1.334399711337115988e+07 0.000000000000000000e+00 0.000000000000000000e+00 9.228057708779163659e+04 2.587420925533573950e+02 1.294219066619279127e-14 7.647773339682528704e-02 -3.709144563412710566e-02 -2.783978113452380276e-02 -1.551370363753968619e-03 2.241157097492063165e-03 8.262895531746041215e-04 1.000000000000000000e+00 2.392166799999999967e+02 5.722461999022239615e+04 1.368908360832263529e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.052655685145325697e+05 2.593739234782133281e+02 2.489479921874677366e-14 8.438259339682528670e-02 -4.554863663412710151e-02 -2.931516913452379691e-02 -1.568884063753968675e-03 2.324322897492063066e-03 1.015189053174604152e-03 1.000000000000000000e+00 2.412439400000000091e+02 5.819863858672360220e+04 1.404006887529723532e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.194148618244394165e+05 2.595694737699126904e+02 1.555171091878242343e-14 1.248369423968252873e-01 -2.113519634127106195e-03 -3.012072513452379585e-02 -1.548244163753968554e-03 1.854907397492062959e-03 1.469042453174604010e-03 1.000000000000000000e+00 2.432712000000000216e+02 5.918087674944000901e+04 1.439700290388837270e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.347784466109283094e+05 2.589335042145449961e+02 2.528064545633945617e-14 1.386098423968253057e-01 -1.554300763412710430e-02 -4.327043134523797518e-03 -1.380193763753968534e-03 1.668424897492063255e-03 1.876337353174604202e-03 1.000000000000000000e+00 2.452984600000000057e+02 6.017133447837160202e+04 1.475993568368945830e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.514063124674152350e+05 2.585203309350814038e+02 1.708358532944420453e-14 1.239822423968252735e-01 -5.095475363412710346e-02 2.847714586547619997e-02 -1.985254663753968554e-03 1.673778397492063034e-03 1.607149453174604170e-03 1.000000000000000000e+00 2.473257200000000182e+02 6.117001177351841034e+04 1.512891720429391786e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.693484489873169805e+05 2.577332151445505701e+02 -1.306066815792015049e-14 1.361771723968253078e-01 -4.787148863412710176e-02 4.220229221547620174e-02 -1.763331063753968610e-03 1.475171397492063084e-03 1.748681753174604041e-03 1.000000000000000000e+00 2.493529800000000023e+02 6.217690863488039759e+04 1.550399745529516041e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.886548457640492998e+05 2.560893166165346315e+02 -2.617243662451785409e-14 1.327212623968253014e-01 -5.556048863412710315e-02 3.156961486547620044e-02 -1.981807663753968815e-03 1.528626197492063011e-03 1.673368953174604080e-03 1.000000000000000000e+00 2.513802400000000148e+02 6.319202506245760742e+04 1.588522642628660984e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.093754923910290236e+05 2.583923782750806595e+02 -3.599677893875522221e-14 1.378797223968253050e-01 -4.097437163412710748e-02 4.345609296547620071e-02 -1.820374863753968561e-03 1.497760797492063162e-03 1.892659753174604376e-03 1.000000000000000000e+00 2.534074999999999989e+02 6.421536105624999618e+04 1.627265410686167143e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.315603784616718476e+05 ''' nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_regression.py000066400000000000000000000074641470056100100262420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.regression """ import numpy as np import scipy.linalg as spl from numpy.testing import assert_array_almost_equal from ..regression import ( AREstimator, ARModel, OLSModel, ar_bias_correct, ar_bias_corrector, yule_walker, ) RNG = np.random.RandomState(20110902) X = RNG.standard_normal((40,10)) Y = RNG.standard_normal((40,)) def test_OLS(): model = OLSModel(design=X) results = model.fit(Y) assert results.df_resid == 30 def test_AR(): model = ARModel(design=X, rho=0.4) results = model.fit(Y) assert results.df_resid == 30 def test_OLS_degenerate(): Xd = X.copy() Xd[:,0] = Xd[:,1] + Xd[:,2] model = OLSModel(design=Xd) results = model.fit(Y) assert results.df_resid == 31 def test_AR_degenerate(): Xd = X.copy() Xd[:,0] = Xd[:,1] + Xd[:,2] model = ARModel(design=Xd, rho=0.9) results = model.fit(Y) assert results.df_resid == 31 def test_yule_walker_R(): # Test YW implementation against R results Y = np.array([1,3,4,5,8,9,10]) N = len(Y) X = np.ones((N, 2)) X[:,0] = np.arange(1,8) pX = spl.pinv(X) betas = np.dot(pX, Y) Yhat = Y - np.dot(X, betas) # R results obtained from: # >>> np.savetxt('yhat.csv', Yhat) # > yhat = read.table('yhat.csv') # > ar.yw(yhat$V1, aic=FALSE, order.max=2) def r_fudge(sigma, order): # Reverse fudge in ar.R calculation labeled as splus compatibility fix return sigma **2 * N / (N-order-1) rhos, sd = yule_walker(Yhat, 1, 'mle') assert_array_almost_equal(rhos, [-0.3004], 4) assert_array_almost_equal(r_fudge(sd, 1), 0.2534, 4) rhos, sd = yule_walker(Yhat, 2, 'mle') assert_array_almost_equal(rhos, [-0.5113, -0.7021], 4) assert_array_almost_equal(r_fudge(sd, 2), 0.1606, 4) rhos, sd = yule_walker(Yhat, 3, 'mle') assert_array_almost_equal(rhos, [-0.6737, -0.8204, -0.2313], 4) assert_array_almost_equal(r_fudge(sd, 3), 0.2027, 4) def test_ar_estimator(): # More or less a smoke test rng = np.random.RandomState(20110903) N = 100 Y = rng.normal(size=(N,1)) * 10 + 100 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] my_model = OLSModel(X) results = my_model.fit(Y) are = AREstimator(my_model,2) rhos = are(results) assert rhos.shape == (2,) assert np.all(np.abs(rhos <= 1)) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) invM = ar_bias_corrector(my_model.design, my_model.calc_beta, 2) rhos3 = ar_bias_correct(results, 2, invM) assert_array_almost_equal(rhos2, rhos3) # Check orders 1 and 3 rhos = ar_bias_correct(results, 1) assert rhos.shape == () assert abs(rhos) <= 1 rhos = ar_bias_correct(results, 3) assert rhos.shape == (3,) assert np.all(np.abs(rhos) <= 1) # Make a 2D Y and try that Y = rng.normal(size=(N,12)) * 10 + 100 results = my_model.fit(Y) rhos = are(results) assert rhos.shape == (2,12) assert np.all(np.abs(rhos <= 1)) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) rhos3 = ar_bias_correct(results, 2, invM) assert_array_almost_equal(rhos2, rhos3) # Passing in a simple array rhos4 = ar_bias_correct(results.resid, 2, invM) assert_array_almost_equal(rhos3, rhos4) # Check orders 1 and 3 rhos = ar_bias_correct(results, 1) assert rhos.shape == (12,) assert np.all(np.abs(rhos) <= 1) rhos = ar_bias_correct(results, 3) assert rhos.shape == (3,12) assert np.all(np.abs(rhos) <= 1) # Try reshaping to 3D results.resid = results.resid.reshape((N,3,4)) rhos = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2.reshape((2,3,4))) nipy-0.6.1/nipy/algorithms/statistics/models/tests/test_utils.py000066400000000000000000000012671470056100100252150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.utils """ import numpy as np import pytest from numpy.testing import assert_array_almost_equal from .. import utils def test_StepFunction(): x = np.arange(20) y = np.arange(20) f = utils.StepFunction(x, y) assert_array_almost_equal(f( np.array([[3.2,4.5],[24,-3.1]]) ), [[ 3, 4], [19, 0]]) def test_StepFunctionBadShape(): x = np.arange(20) y = np.arange(21) pytest.raises(ValueError, utils.StepFunction, x, y) x = np.zeros((2, 2)) y = np.zeros((2, 2)) pytest.raises(ValueError, utils.StepFunction, x, y) nipy-0.6.1/nipy/algorithms/statistics/models/utils.py000066400000000000000000000047221470056100100230130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' General matrix and other utilities for statistics ''' __docformat__ = 'restructuredtext' import numpy as np import scipy.interpolate def mad(a, c=0.6745, axis=0): """ Median Absolute Deviation: median(abs(a - median(a))) / c """ _shape = a.shape a.shape = np.prod(a.shape, axis=0) m = np.median(np.fabs(a - np.median(a))) / c a.shape = _shape return m class StepFunction: """ A basic step function Values at the ends are handled in the simplest way possible: everything to the left of ``x[0]`` is set to `ival`; everything to the right of ``x[-1]`` is set to ``y[-1]``. Examples -------- >>> x = np.arange(20) >>> y = np.arange(20) >>> f = StepFunction(x, y) >>> >>> f(3.2) 3.0 >>> res = f([[3.2, 4.5],[24, -3.1]]) >>> np.all(res == [[ 3, 4], ... [19, 0]]) True """ def __init__(self, x, y, ival=0., sorted=False): _x = np.asarray(x) _y = np.asarray(y) if _x.shape != _y.shape: raise ValueError( 'in StepFunction: x and y do not have the same shape') if len(_x.shape) != 1: raise ValueError('in StepFunction: x and y must be 1-dimensional') self.x = np.hstack([[- np.inf], _x]) self.y = np.hstack([[ival], _y]) if not sorted: asort = np.argsort(self.x) self.x = np.take(self.x, asort, 0) self.y = np.take(self.y, asort, 0) self.n = self.x.shape[0] def __call__(self, time): tind = np.searchsorted(self.x, time) - 1 return self.y[tind] def ECDF(values): """ Return the ECDF of an array as a step function. """ x = np.array(values, copy=True) x.sort() x.shape = np.prod(x.shape, axis=0) n = x.shape[0] y = (np.arange(n) + 1.) / n return StepFunction(x, y) def monotone_fn_inverter(fn, x, vectorized=True, **keywords): """ Given a monotone function x (no checking is done to verify monotonicity) and a set of x values, return an linearly interpolated approximation to its inverse from its values on x. """ if vectorized: y = fn(x, **keywords) else: y = [] for _x in x: y.append(fn(_x, **keywords)) y = np.array(y) a = np.argsort(y) return scipy.interpolate.interp1d(y[a], x[a]) nipy-0.6.1/nipy/algorithms/statistics/onesample.py000066400000000000000000000101261470056100100223460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities for one sample t-tests """ __docformat__ = 'restructuredtext' import numpy as np from ..utils.matrices import pos_recipr def estimate_mean(Y, sd): """ Estimate the mean of a sample given information about the standard deviations of each entry. Parameters ---------- Y : ndarray Data for which mean is to be estimated. Should have shape[0] == number of subjects. sd : ndarray Standard deviation (subject specific) of the data for which the mean is to be estimated. Should have shape[0] == number of subjects. Returns ------- value : dict This dictionary has keys ['effect', 'scale', 't', 'resid', 'sd'] """ nsubject = Y.shape[0] squeeze = False if Y.ndim == 1: Y = Y.reshape(Y.shape[0], 1) squeeze = True _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x) W = pos_recipr(sd**2) if W.shape in [(), (1,)]: W = np.ones(Y.shape) * W W.shape = Y.shape # Compute the mean using the optimal weights effect = (Y * W).sum(0) / W.sum(0) resid = (Y - _stretch(effect)) * np.sqrt(W) scale = np.add.reduce(np.power(resid, 2), 0) / (nsubject - 1) var_total = scale * pos_recipr(W.sum(0)) value = {} value['resid'] = resid value['effect'] = effect value['sd'] = np.sqrt(var_total) value['t'] = value['effect'] * pos_recipr(value['sd']) value['scale'] = np.sqrt(scale) if squeeze: for key, val in value.items(): value[key] = np.squeeze(val) return value def estimate_varatio(Y, sd, df=None, niter=10): """ Estimate variance fixed/random effects variance ratio In a one-sample random effects problem, estimate the ratio between the fixed effects variance and the random effects variance. Parameters ---------- Y : np.ndarray Data for which mean is to be estimated. Should have shape[0] == number of subjects. sd : array Standard deviation (subject specific) of the data for which the mean is to be estimated. Should have shape[0] == number of subjects. df : int or None, optional If supplied, these are used as weights when deriving the fixed effects variance. Should have length == number of subjects. niter : int, optional Number of EM iterations to perform (default 10) Returns ------- value : dict This dictionary has keys ['fixed', 'ratio', 'random'], where 'fixed' is the fixed effects variance implied by the input parameter 'sd'; 'random' is the random effects variance and 'ratio' is the estimated ratio of variances: 'random'/'fixed'. """ nsubject = Y.shape[0] squeeze = False if Y.ndim == 1: Y = Y.reshape(Y.shape[0], 1) squeeze = True _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x) W = pos_recipr(sd**2) if W.shape in [(), (1,)]: W = np.ones(Y.shape) * W W.shape = Y.shape S = 1. / W R = Y - np.multiply.outer(np.ones(Y.shape[0]), Y.mean(0)) sigma2 = np.squeeze((R**2).sum(0)) / (nsubject - 1) Sreduction = 0.99 minS = S.min(0) * Sreduction Sm = S - _stretch(minS) for _ in range(niter): Sms = Sm + _stretch(sigma2) W = pos_recipr(Sms) Winv = pos_recipr(W.sum(0)) mu = Winv * (W*Y).sum(0) R = W * (Y - _stretch(mu)) ptrS = 1 + (Sm * W).sum(0) - (Sm * W**2).sum(0) * Winv sigma2 = np.squeeze((sigma2 * ptrS + (sigma2**2) * (R**2).sum(0)) / nsubject) sigma2 = sigma2 - minS if df is None: df = np.ones(nsubject) df.shape = (1, nsubject) _Sshape = S.shape S.shape = (S.shape[0], np.prod(S.shape[1:])) value = {} value['fixed'] = (np.dot(df, S) / df.sum()).reshape(_Sshape[1:]) value['ratio'] = np.nan_to_num(sigma2 / value['fixed']) value['random'] = sigma2 if squeeze: for key in list(value): value[key] = np.squeeze(value[key]) return value nipy-0.6.1/nipy/algorithms/statistics/quantile.c000066400000000000000000000114631470056100100220040ustar00rootroot00000000000000#include "quantile.h" #include #include #ifdef INFINITY #define POSINF INFINITY #else #define POSINF HUGE_VAL #endif #define UNSIGNED_FLOOR(a) ( (int)(a) ) #define UNSIGNED_CEIL(a) ( ( (int)(a)-a )!=0.0 ? (int)(a+1) : (int)(a) ) #define SWAP(a, b) {tmp=(a); (a)=(b); (b)=tmp;} /* Declaration of static functions */ static double _pth_element(double* x, npy_intp p, npy_intp stride, npy_intp size); static void _pth_interval(double* am, double* aM, double* x, npy_intp p, npy_intp stride, npy_intp size); /* Quantile. Given a sample x, this function computes a value q so that the number of sample values that are greater or equal to q is smaller or equal to (1-r) * sample size. */ double quantile(double* data, npy_intp size, npy_intp stride, double r, int interp) { double m, pp; npy_intp p; if ((r<0) || (r>1)){ fprintf(stderr, "Ratio must be in [0,1], returning zero"); return 0.0; } if (size == 1) return data[0]; /* Find the smallest index p so that p >= r * size */ if (!interp) { pp = r * size; p = UNSIGNED_CEIL(pp); if (p == size) return POSINF; m = _pth_element(data, p, stride, size); } else { double wm, wM; pp = r * (size-1); p = UNSIGNED_FLOOR(pp); wM = pp - (double)p; wm = 1.0 - wM; if (wM <= 0) m = _pth_element(data, p, stride, size); else { double am, aM; _pth_interval(&am, &aM, data, p, stride, size); m = wm*am + wM*aM; } } return m; } /*** STATIC FUNCTIONS ***/ /* BEWARE: the input array x gets modified! */ /* Pick up the sample value a so that: (p+1) sample values are <= a AND the remaining sample values are >= a */ static double _pth_element(double* x, npy_intp p, npy_intp stride, npy_intp n) { double a, tmp; double *bufl, *bufr; npy_intp i, j, il, jr, stop1, stop2; int same_extremities; stop1 = 0; il = 0; jr = n-1; while (stop1 == 0) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) return a; bufl += stride; i = il + 1; j = jr; stop2 = 0; while (stop2 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop2 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values. This situation can only occur with i == j */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop2 = 1; } } /* At this point, we know that il <= j <= i; moreover: if k <= j, x(j) <= a and if k > j, x(j) >= a if k < i, x(i) <= a and if k >= i, x(i) >= a We hence have: (j+1) values <= a and the remaining (n-j-1) >= a i values <= a and the remaining (n-i) >= a */ if (j > p) jr = j; else if (j < p) il = i; else /* j == p */ stop1 = 1; } return a; } /* BEWARE: the input array x gets modified! */ static void _pth_interval(double* am, double* aM, double* x, npy_intp p, npy_intp stride, npy_intp n) { double a, tmp; double *bufl, *bufr; npy_intp i, j, il, jr, stop1, stop2, stop3; npy_intp pp = p+1; int same_extremities = 0; *am = 0.0; *aM = 0.0; stop1 = 0; stop2 = 0; il = 0; jr = n-1; while ((stop1 == 0) || (stop2 == 0)) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) { *am=a; *aM=a; return; } bufl += stride; i = il + 1; j = jr; stop3 = 0; while (stop3 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop3 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop3 = 1; } } /* At this point, we know that there are (j+1) datapoints <=a including a itself, and another (n-j-1) datapoints >=a */ if (j > pp) jr = j; else if (j < p) il = i; /* Case: found percentile at p */ else if (j == p) { il = i; *am = a; stop1 = 1; } /* Case: found percentile at (p+1), ie j==(p+1) */ else { jr = j; *aM = a; stop2 = 1; } } return; } nipy-0.6.1/nipy/algorithms/statistics/quantile.h000066400000000000000000000004231470056100100220030ustar00rootroot00000000000000#ifndef QUANTILE #define QUANTILE #ifdef __cplusplus extern "C" { #endif #include #include extern double quantile(double* data, npy_intp size, npy_intp stride, double r, int interp); #ifdef __cplusplus } #endif #endif nipy-0.6.1/nipy/algorithms/statistics/rft.py000066400000000000000000000564571470056100100211770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Random field theory routines The theoretical results for the EC densities appearing in this module were partially supported by NSF grant DMS-0405970. Taylor, J.E. & Worsley, K.J. (2012). "Detecting sparse cone alternatives for Gaussian random fields, with an application to fMRI". arXiv:1207.3840 [math.ST] and Statistica Sinica 23 (2013): 1629-1656. Taylor, J.E. & Worsley, K.J. (2008). "Random fields of multivariate test statistics, with applications to shape analysis." arXiv:0803.1708 [math.ST] and Annals of Statistics 36( 2008): 1-27 """ import numpy as np from numpy.linalg import pinv from scipy import stats try: from scipy.misc import factorial except ImportError: from scipy.special import factorial from scipy.special import beta, gamma, gammaln, hermitenorm # Legacy repr printing from numpy. def binomial(n, k): """ Binomial coefficient n! c = --------- (n-k)! k! Parameters ---------- n : float n of (n, k) k : float k of (n, k) Returns ------- c : float Examples -------- First 3 values of 4 th row of Pascal triangle >>> [binomial(4, k) for k in range(3)] [1.0, 4.0, 6.0] """ if n <= k or n == 0: return 0. elif k == 0: return 1. return 1./(beta(n-k+1, k+1)*(n+1)) def Q(dim, dfd=np.inf): r""" Q polynomial If `dfd` == inf (the default), then Q(dim) is the (dim-1)-st Hermite polynomial: .. math:: H_j(x) = (-1)^j * e^{x^2/2} * (d^j/dx^j e^{-x^2/2}) If `dfd` != inf, then it is the polynomial Q defined in [Worsley1994]_ Parameters ---------- dim : int dimension of polynomial dfd : scalar Returns ------- q_poly : np.poly1d instance References ---------- .. [Worsley1994] Worsley, K.J. (1994). 'Local maxima and the expected Euler characteristic of excursion sets of \chi^2, F and t fields.' Advances in Applied Probability, 26:13-42. """ m = dfd j = dim if j <= 0: raise ValueError('Q defined only for dim > 0') coeffs = np.around(hermitenorm(j - 1).c) if np.isfinite(m): for L in range((j - 1) // 2 + 1): f = np.exp(gammaln((m + 1) / 2.) - gammaln((m + 2 - j + 2 * L) / 2.) - 0.5 * (j - 1 - 2 * L) * (np.log(m / 2.))) coeffs[2 * L] *= f return np.poly1d(coeffs) class ECquasi(np.poly1d): """ Polynomials with premultiplier A subclass of poly1d consisting of polynomials with a premultiplier of the form: (1 + x^2/m)^-exponent where m is a non-negative float (possibly infinity, in which case the function is a polynomial) and exponent is a non-negative multiple of 1/2. These arise often in the EC densities. Examples -------- >>> import numpy >>> from nipy.algorithms.statistics.rft import ECquasi >>> x = numpy.linspace(0,1,101) >>> a = ECquasi([3,4,5]) >>> a ECquasi(array([3, 4, 5]), m=inf, exponent=0.000000) >>> a(3) == 3*3**2 + 4*3 + 5 True >>> b = ECquasi(a.coeffs, m=30, exponent=4) >>> numpy.allclose(b(x), a(x) * numpy.power(1+x**2/30, -4)) True """ def __init__(self, c_or_r, r=0, exponent=None, m=None): np.poly1d.__init__(self, c_or_r, r=r, variable='x') if exponent is None and not hasattr(self, 'exponent'): self.exponent = 0 elif not hasattr(self, 'exponent'): self.exponent = exponent if m is None and not hasattr(self, 'm'): self.m = np.inf elif not hasattr(self, 'm'): self.m = m if not np.isfinite(self.m): self.exponent = 0. def denom_poly(self): """ Base of the premultiplier: (1+x^2/m). Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> d = b.denom_poly() >>> d poly1d([ 0.03333333, 0. , 1. ]) >>> numpy.allclose(d.c, [1./b.m,0,1]) True """ return np.poly1d([1./self.m, 0, 1]) def change_exponent(self, _pow): """ Change exponent Multiply top and bottom by an integer multiple of the self.denom_poly. Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> x = numpy.linspace(0,1,101) >>> c = b.change_exponent(3) >>> c ECquasi(array([ 1.11111111e-04, 1.48148148e-04, 1.07407407e-02, 1.33333333e-02, 3.66666667e-01, 4.00000000e-01, 5.00000000e+00, 4.00000000e+00, 2.00000000e+01]), m=30.000000, exponent=7.000000) >>> numpy.allclose(c(x), b(x)) True """ if np.isfinite(self.m): _denom_poly = self.denom_poly() if int(_pow) != _pow or _pow < 0: raise ValueError('expecting a non-negative integer') p = _denom_poly**int(_pow) exponent = self.exponent + _pow coeffs = np.polymul(self, p).coeffs return ECquasi(coeffs, exponent=exponent, m=self.m) else: return ECquasi(self.coeffs, exponent=self.exponent, m=self.m) def __setattr__(self, key, val): if key == 'exponent': if 2*float(val) % 1 == 0: self.__dict__[key] = float(val) else: raise ValueError(f'expecting multiple of a half, got {val:f}') elif key == 'm': if float(val) > 0 or val == np.inf: self.__dict__[key] = val else: raise ValueError('expecting positive float or inf') else: np.poly1d.__setattr__(self, key, val) def compatible(self, other): """ Check compatibility of degrees of freedom Check whether the degrees of freedom of two instances are equal so that they can be multiplied together. Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> x = numpy.linspace(0,1,101) >>> c = b.change_exponent(3) >>> b.compatible(c) True >>> d = ECquasi([3,4,20]) >>> b.compatible(d) False >>> """ if self.m != other.m: #raise ValueError, 'quasi polynomials are not compatible, m disagrees' return False return True def __add__(self, other): """ Add two compatible ECquasi instances together. Examples -------- >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> c = ECquasi([1], m=30, exponent=4) >>> b+c #doctest: +FIX ECquasi(array([ 3, 4, 21]), m=30.000000, exponent=4.000000) >>> d = ECquasi([1], m=30, exponent=3) >>> b+d ECquasi(array([ 3.03333333, 4. , 21. ]), m=30.000000, exponent=4.000000) """ if self.compatible(other): if np.isfinite(self.m): M = max(self.exponent, other.exponent) q1 = self.change_exponent(M-self.exponent) q2 = other.change_exponent(M-other.exponent) p = np.poly1d.__add__(q1, q2) return ECquasi(p.coeffs, exponent=M, m=self.m) else: p = np.poly1d.__add__(self, other) return ECquasi(p.coeffs, exponent=0, m=self.m) def __mul__(self, other): """ Multiply two compatible ECquasi instances together. Examples -------- >>> b=ECquasi([3,4,20], m=30, exponent=4) >>> c=ECquasi([1,2], m=30, exponent=4.5) >>> b*c ECquasi(array([ 3, 10, 28, 40]), m=30.000000, exponent=8.500000) """ if np.isscalar(other): return ECquasi(self.coeffs * other, m=self.m, exponent=self.exponent) elif self.compatible(other): p = np.poly1d.__mul__(self, other) return ECquasi(p.coeffs, exponent=self.exponent+other.exponent, m=self.m) def __call__(self, val): """Evaluate the ECquasi instance. Examples -------- >>> import numpy >>> x = numpy.linspace(0,1,101) >>> a = ECquasi([3,4,5]) >>> a ECquasi(array([3, 4, 5]), m=inf, exponent=0.000000) >>> a(3) == 3*3**2 + 4*3 + 5 True >>> b = ECquasi(a.coeffs, m=30, exponent=4) >>> numpy.allclose(b(x), a(x) * numpy.power(1+x**2/30, -4)) True """ n = np.poly1d.__call__(self, val) _p = self.denom_poly()(val) return n / np.power(_p, self.exponent) def __div__(self, other): raise NotImplementedError def __eq__(self, other): return (np.poly1d.__eq__(self, other) and self.m == other.m and self.exponent == other.exponent) def __ne__(self, other): return not self.__eq__(other) def __pow__(self, _pow): """ Power of a ECquasi instance. Examples -------- >>> b = ECquasi([3,4,5],m=10, exponent=3) >>> b**2 ECquasi(array([ 9, 24, 46, 40, 25]), m=10.000000, exponent=6.000000) """ p = np.poly1d.__pow__(self, int(_pow)) q = ECquasi(p, m=self.m, exponent=_pow*self.exponent) return q def __sub__(self, other): """ Subtract `other` from `self` Parameters ---------- other : ECquasi instance Returns ------- subbed : ECquasi Examples -------- >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> c = ECquasi([1,2], m=30, exponent=4) >>> print(b-c) #doctest: +FIX ECquasi(array([ 3, 3, 18]), m=30.000000, exponent=4.000000) """ return self + (other * -1) def __repr__(self): if not np.isfinite(self.m): m = repr(self.m) else: m = f'{self.m:f}' return f"ECquasi({self.coeffs!r}, m={m}, exponent={self.exponent:f})" __str__ = __repr__ __rsub__ = __sub__ __rmul__ = __mul__ __rdiv__ = __div__ def deriv(self, m=1): """ Evaluate derivative of ECquasi Parameters ---------- m : int, optional Examples -------- >>> a = ECquasi([3,4,5]) >>> a.deriv(m=2) #doctest: +FIX ECquasi(array([6]), m=inf, exponent=0.000000) >>> b = ECquasi([3,4,5], m=10, exponent=3) >>> b.deriv() ECquasi(array([-1.2, -2. , 3. , 4. ]), m=10.000000, exponent=4.000000) """ if m == 1: if np.isfinite(self.m): q1 = ECquasi(np.poly1d.deriv(self, m=1), m=self.m, exponent=self.exponent) q2 = ECquasi(np.poly1d.__mul__(self, self.denom_poly().deriv(m=1)), m = self.m, exponent=self.exponent+1) return q1 - self.exponent * q2 else: return ECquasi(np.poly1d.deriv(self, m=1), m=np.inf, exponent=0) else: d = self.deriv(m=1) return d.deriv(m=m-1) class fnsum: def __init__(self, *items): self.items = list(items) def __call__(self, x): v = 0 for q in self.items: v += q(x) return v class IntrinsicVolumes: """ Compute intrinsic volumes of products of sets A simple class that exists only to compute the intrinsic volumes of products of sets (that themselves have intrinsic volumes, of course). """ def __init__(self, mu=[1]): if isinstance(mu, IntrinsicVolumes): mu = mu.mu self.mu = np.asarray(mu, np.float64) self.order = self.mu.shape[0]-1 def __str__(self): return str(self.mu) def __mul__(self, other): if not isinstance(other, IntrinsicVolumes): raise ValueError('expecting an IntrinsicVolumes instance') order = self.order + other.order + 1 mu = np.zeros(order) for i in range(order): for j in range(i+1): try: mu[i] += self.mu[j] * other.mu[i-j] except: pass return self.__class__(mu) class ECcone(IntrinsicVolumes): """ EC approximation to supremum distribution of var==1 Gaussian process A class that takes the intrinsic volumes of a set and gives the EC approximation to the supremum distribution of a unit variance Gaussian process with these intrinsic volumes. This is the basic building block of all of the EC densities. If product is not None, then this product (an instance of IntrinsicVolumes) will effectively be prepended to the search region in any call, but it will also affect the (quasi-)polynomial part of the EC density. For instance, Hotelling's T^2 random field has a sphere as product, as does Roy's maximum root. """ def __init__(self, mu=[1], dfd=np.inf, search=[1], product=[1]): self.dfd = dfd IntrinsicVolumes.__init__(self, mu=mu) self.product = IntrinsicVolumes(product) self.search = IntrinsicVolumes(search) def __call__(self, x, search=None): """ Get expected EC for a search region Default is self.search which itself defaults to [1] giving the survival function. """ x = np.asarray(x, np.float64) if search is None: search = self.search else: search = IntrinsicVolumes(search) search *= self.product if np.isfinite(self.dfd): q_even = ECquasi([0], m=self.dfd, exponent=0) q_odd = ECquasi([0], m=self.dfd, exponent=0.5) else: q_even = np.poly1d([0]) q_odd = np.poly1d([0]) for k in range(search.mu.shape[0]): q = self.quasi(k) c = float(search.mu[k]) * np.power(2*np.pi, -(k+1)/2.) if np.isfinite(self.dfd): q_even += q[0] * c q_odd += q[1] * c else: q_even += q * c _rho = q_even(x) + q_odd(x) if np.isfinite(self.dfd): _rho *= np.power(1 + x**2/self.dfd, -(self.dfd-1)/2.) else: _rho *= np.exp(-x**2/2.) if search.mu[0] * self.mu[0] != 0.: # tail probability is not "quasi-polynomial" if not np.isfinite(self.dfd): P = stats.norm.sf else: P = lambda x: stats.t.sf(x, self.dfd) _rho += P(x) * search.mu[0] * self.mu[0] return _rho def pvalue(self, x, search=None): return self(x, search=search) def integ(self, m=None, k=None): raise NotImplementedError # this could be done with stats.t, # at least m=1 def density(self, x, dim): """ The EC density in dimension `dim`. """ return self(x, search=[0]*dim+[1]) def _quasi_polynomials(self, dim): """ list of quasi-polynomials for EC density calculation. """ c = self.mu / np.power(2*np.pi, np.arange(self.order+1.)/2.) quasi_polynomials = [] for k in range(c.shape[0]): if k+dim > 0: _q = ECquasi(Q(k+dim, dfd=self.dfd), m=self.dfd, exponent=k/2.) _q *= float(c[k]) quasi_polynomials.append(_q) return quasi_polynomials def quasi(self, dim): r""" (Quasi-)polynomial parts of EC density in dimension `dim` - ignoring a factor of (2\pi)^{-(dim+1)/2} in front. """ q_even = ECquasi([0], m=self.dfd, exponent=0) q_odd = ECquasi([0], m=self.dfd, exponent=0.5) quasi_polynomials = self._quasi_polynomials(dim) for k in range(len(quasi_polynomials)): _q = quasi_polynomials[k] if _q.exponent % 1 == 0: q_even += _q else: q_odd += _q if not np.isfinite(self.dfd): q_even += q_odd return np.poly1d(q_even.coeffs) else: return (q_even, q_odd) Gaussian = ECcone def mu_sphere(n, j, r=1): """ `j`th curvature for `n` dimensional sphere radius `r` Return mu_j(S_r(R^n)), the j-th Lipschitz Killing curvature of the sphere of radius r in R^n. From Chapter 6 of Adler & Taylor, 'Random Fields and Geometry'. 2006. """ if j < n: if n-1 == j: return 2 * np.power(np.pi, n/2.) * np.power(r, n-1) / gamma(n/2.) if (n-1-j)%2 == 0: return 2 * binomial(n-1, j) * mu_sphere(n,n-1) * np.power(r, j) / mu_sphere(n-j,n-j-1) else: return 0 else: return 0 def mu_ball(n, j, r=1): """ `j`th curvature of `n`-dimensional ball radius `r` Return mu_j(B_n(r)), the j-th Lipschitz Killing curvature of the ball of radius r in R^n. """ if j <= n: if n == j: return np.power(np.pi, n/2.) * np.power(r, n) / gamma(n/2. + 1.) else: return binomial(n, j) * np.power(r, j) * mu_ball(n,n) / mu_ball(n-j,n-j) else: return 0 def spherical_search(n, r=1): """ A spherical search region of radius r. """ return IntrinsicVolumes([mu_sphere(n,j,r=r) for j in range(n)]) def ball_search(n, r=1): """ A ball-shaped search region of radius r. """ return IntrinsicVolumes([mu_ball(n,j,r=r) for j in range(n+1)]) def volume2ball(vol, d=3): """ Approximate volume with ball Approximate intrinsic volumes of a set with a given volume by those of a ball with a given dimension and equal volume. """ if d > 0: r = np.power(vol * 1. / mu_ball(d, d), 1./d) return ball_search(d, r=r) else: return IntrinsicVolumes([1]) class ChiSquared(ECcone): """ EC densities for a Chi-Squared(n) random field. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x), search=search) class TStat(ECcone): """ EC densities for a t random field. """ def __init__(self, dfd=np.inf, search=[1]): ECcone.__init__(self, mu=[1], dfd=dfd, search=search) class FStat(ECcone): """ EC densities for a F random field. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) class Roy(ECcone): """ Roy's maximum root Maximize an F_{dfd,dfn} statistic over a sphere of dimension k. """ def __init__(self, dfn=1, dfd=np.inf, k=1, search=[1]): product = spherical_search(k) self.k = k self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd, product=product) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) class MultilinearForm(ECcone): """ Maximize a multivariate Gaussian form Maximized over spheres of dimension dims. See: Kuri, S. & Takemura, A. (2001). 'Tail probabilities of the maxima of multilinear forms and their applications.' Ann. Statist. 29(2): 328-371. """ def __init__(self, *dims, **keywords): product = IntrinsicVolumes([1]) search = keywords.pop('search', [1]) for d in dims: product *= spherical_search(d) product.mu /= 2.**(len(dims)-1) ECcone.__init__(self, search=search, product=product) class Hotelling(ECcone): """ Hotelling's T^2 Maximize an F_{1,dfd}=T_dfd^2 statistic over a sphere of dimension `k`. """ def __init__(self, dfd=np.inf, k=1, search=[1]): product = spherical_search(k) self.k = k ECcone.__init__(self, mu=[1], search=search, dfd=dfd, product=product) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x), search=search) class OneSidedF(ECcone): """ EC densities for one-sided F statistic See: Worsley, K.J. & Taylor, J.E. (2005). 'Detecting fMRI activation allowing for unknown latency of the hemodynamic response.' Neuroimage, 29,649-654. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn self.regions = [spherical_search(dfn), spherical_search(dfn-1)] ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): IntrinsicVolumes.__init__(self, self.regions[0]) d1 = ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) IntrinsicVolumes.__init__(self, self.regions[1]) d2 = ECcone.__call__(self, np.sqrt(x * (self.dfn-1)), search=search) self.mu = self.regions[0].mu return (d1 - d2) * 0.5 class ChiBarSquared(ChiSquared): def _getmu(self): x = np.linspace(0, 2 * self.dfn, 100) sf = 0. g = Gaussian() for i in range(1, self.dfn+1): sf += binomial(self.dfn, i) * stats.chi.sf(x, i) / np.power(2., self.dfn) d = np.array([g.density(np.sqrt(x), j) for j in range(self.dfn)]) c = np.dot(pinv(d.T), sf) sf += 1. / np.power(2, self.dfn) self.mu = IntrinsicVolumes(c) def __init__(self, dfn=1, search=[1]): ChiSquared.__init__(self, dfn=dfn, search=search) self._getmu() def __call__(self, x, dim=0, search=[1]): if search is None: search = self.stat else: search = IntrinsicVolumes(search) * self.stat return FStat.__call__(self, x, dim=dim, search=search) def scale_space(region, interval, kappa=1.): """ scale space intrinsic volumes of region x interval See: Siegmund, D.O and Worsley, K.J. (1995). 'Testing for a signal with unknown location and scale in a stationary Gaussian random field.' Annals of Statistics, 23:608-639. and Taylor, J.E. & Worsley, K.J. (2005). 'Random fields of multivariate test statistics, with applications to shape analysis and fMRI.' (available on http://www.math.mcgill.ca/keith """ w1, w2 = interval region = IntrinsicVolumes(region) D = region.order out = np.zeros((D+2,)) out[0] = region.mu[0] for i in range(1, D+2): if i < D+1: out[i] = (1./w1 + 1./w2) * region.mu[i] * 0.5 for j in range(int(np.floor((D-i+1)/2.)+1)): denom = (i + 2*j - 1.) # w^-i/i when i=0 # according to Keith Worsley the 2005 paper has a typo if denom == 0: f = np.log(w2/w1) else: f = (w1**(-i-2*j+1) - w2**(-i-2*j+1)) / denom f *= kappa**((1-2*j)/2.) * (-1)**j * factorial(int(denom)) f /= (1 - 2*j) * (4*np.pi)**j * factorial(j) * factorial(i-1) out[i] += region.mu[int(denom)] * f return IntrinsicVolumes(out) nipy-0.6.1/nipy/algorithms/statistics/tests/000077500000000000000000000000001470056100100211535ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/statistics/tests/__init__.py000066400000000000000000000003421470056100100232630ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipy.algorithms.statistics.tests.test_intrinsic_volumes import nipy.algorithms.statistics.tests.test_rft nipy-0.6.1/nipy/algorithms/statistics/tests/test_empirical_pvalue.py000066400000000000000000000034271470056100100261130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the empirical null estimator. """ import numpy as np from ..empirical_pvalue import ( NormalEmpiricalNull, fdr, fdr_threshold, gaussian_fdr, gaussian_fdr_threshold, smoothed_histogram_from_samples, ) def test_efdr(): # generate the data n = 100000 x = np.random.randn(n) x[:3000] += 3 # make the tests efdr = NormalEmpiricalNull(x) np.testing.assert_array_less(efdr.fdr(3.0), 0.2) np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -2.8) np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -2.5) def test_smooth_histo(): n = 100 x = np.random.randn(n) h, c = smoothed_histogram_from_samples(x, normalized=True) thh = 1. / np.sqrt(2 * np.pi) hm = h.max() assert np.absolute(hm - thh) < 0.15 def test_fdr_pos(): # test with some significant values np.random.seed([1]) x = np.random.rand(100) x[:10] *= (.05 / 10) q = fdr(x) assert (q[:10] < .05).all() pc = fdr_threshold(x) assert (pc > .0025) & (pc < .1) def test_fdr_neg(): # test without some significant values np.random.seed([1]) x = np.random.rand(100) * .8 + .2 q =fdr(x) assert (q > .05).all() pc = fdr_threshold(x) assert pc == .05 / 100 def test_gaussian_fdr(): # Test that fdr works on Gaussian data np.random.seed([2]) x = np.random.randn(100) * 2 fdr = gaussian_fdr(x) assert fdr.min() < .05 assert fdr.max() > .99 def test_gaussian_fdr_threshold(): np.random.seed([2]) x = np.random.randn(100) * 2 ac = gaussian_fdr_threshold(x) assert ac > 2.0 assert ac < 4.0 assert ac > gaussian_fdr_threshold(x, alpha=.1) nipy-0.6.1/nipy/algorithms/statistics/tests/test_histogram.py000066400000000000000000000005431470056100100245630ustar00rootroot00000000000000import numpy as np from numpy.testing import assert_array_equal from ..histogram import histogram def test_histogram(): x = np.array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype='uintp') h = histogram(x) assert_array_equal(h, [1, 2, 3, 4, 5]) nipy-0.6.1/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py000066400000000000000000000305021470056100100263400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from itertools import chain, combinations import numpy as np import numpy.linalg as npl import pytest from numpy.testing import assert_almost_equal, assert_array_equal from nipy.utils import SCTYPES from .. import intvol def symnormal(p=10): M = np.random.standard_normal((p,p)) return (M + M.T) / np.sqrt(2) def randorth(p=10): """ A random orthogonal matrix. """ A = symnormal(p) return npl.eig(A)[1] def box(shape, edges): data = np.zeros(shape) sl = [slice(edges[i][0], edges[i][1],1) for i in range(len(shape))] data[tuple(sl)] = 1 return data.astype(np.int_) def randombox(shape): """ Generate a random box, returning the box and the edge lengths """ edges = [np.random.randint(0, shape[j] + 1, size=(2,)) for j in range(len(shape))] for j in range(len(shape)): edges[j].sort() if edges[j][0] == edges[j][1]: edges[j][0] = 0; edges[j][1] = shape[j]/2+1 return edges, box(shape, edges) def elsym(edgelen, order=1): """ Elementary symmetric polynomial of a given order """ l = len(edgelen) if order == 0: return 1 r = 0 for v in combinations(range(l), order): r += np.prod([edgelen[vv] for vv in v]) return r def nonintersecting_boxes(shape): """ The Lips's are supposed to be additive, so disjoint things should be additive. But, if they ALMOST intersect, different things get added to the triangulation. >>> b1 = np.zeros(40, np.int_) >>> b1[:11] = 1 >>> b2 = np.zeros(40, np.int_) >>> b2[11:] = 1 >>> (b1*b2).sum() 0 >>> c = np.indices((40,)).astype(np.float64) >>> intvol.Lips1d(c, b1) array([ 1., 10.]) >>> intvol.Lips1d(c, b2) array([ 1., 28.]) >>> intvol.Lips1d(c, b1+b2) array([ 1., 39.]) The function creates two boxes such that the 'dilated' box1 does not intersect with box2. Additivity works in this case. """ while True: edge1, box1 = randombox(shape) edge2, box2 = randombox(shape) diledge1 = [[max(ed[0]-1, 0), min(ed[1]+1, sh)] for ed, sh in zip(edge1, box1.shape)] dilbox1 = box(box1.shape, diledge1) if set(np.unique(dilbox1 + box2)).issubset([0,1]): break return box1, box2, edge1, edge2 def pts2dots(d, a, b, c): """ Convert point coordinates to dot products """ D00 = np.dot(d, d) D01 = np.dot(d, a) D02 = np.dot(d, b) D03 = np.dot(d, c) D11 = np.dot(a, a) D12 = np.dot(a, b) D13 = np.dot(a, c) D22 = np.dot(b, b) D23 = np.dot(b, c) D33 = np.dot(c, c) return D00, D01, D02, D03, D11, D12, D13, D22, D23, D33 def pts2mu3_tet(d, a, b, c): """ Accept point coordinates for calling mu3tet """ return intvol.mu3_tet(*pts2dots(d, a, b, c)) def wiki_tet_vol(d, a, b, c): # Wikipedia formula for generalized tetrahedron volume d, a, b, c = (np.array(e) for e in (d, a, b, c)) cp = np.cross((b-d),(c-d)) v2t6 = np.dot((a-d), cp) return np.sqrt(v2t6) / 6. def test_mu3tet(): assert intvol.mu3_tet(0,0,0,0,1,0,0,1,0,1) == 1./6 assert intvol.mu3_tet(0,0,0,0,0,0,0,0,0,0) == 0 d = [2,2,2] a = [3,2,2] b = [2,3,2] c = [2,2,3] assert pts2mu3_tet(d, a, b, c) == 1./6 assert wiki_tet_vol(d, a, b, c) == 1./6 # This used to generate nan values assert intvol.mu3_tet(0,0,0,0,1,0,0,-1,0,1) == 0 def test_mu2tri(): assert intvol.mu2_tri(0,0,0,1,0,1) == 1./2 def test_mu1tri(): assert intvol.mu1_tri(0,0,0,1,0,1) == 1+np.sqrt(2)/2 def test_mu2tet(): # 15 digit precision error found on 32-bit Linux # https://travis-ci.org/MacPython/nipy-wheels/jobs/140268248#L725 assert_almost_equal(intvol.mu2_tet(0,0,0,0,1,0,0,1,0,1), (3./2 + np.sqrt(3./4))/2, 15) def pts2mu1_tet(d, a, b, c): """ Accept point coordinates for calling mu1_tet """ return intvol.mu1_tet(*pts2dots(d, a, b, c)) def test_mu1_tet(): res1 = pts2mu1_tet([2,2,2],[3,2,2],[2,3,2],[2,2,3]) res2 = pts2mu1_tet([0,0,0],[1,0,0],[0,1,0],[0,0,1]) assert res1 == res2 assert intvol.mu1_tet(0,0,0,0,0,0,0,0,0,0) == 0 # This used to generate nan values assert intvol.mu1_tet(0,0,0,0,1,0,0,-1,0,1) == 0 def test__mu1_tetface(): # Test for out of range acos value sequences. I'm ashamed to say I found # these sequences accidentally in a failing test with random numbers _mu1_tetface = intvol._mu1_tetface assert_almost_equal(_mu1_tetface(1, 0, 0, 10, 10, 0, 0, 20, 20, 40), 0) assert_almost_equal(_mu1_tetface(36, 0, 0, 18, 48, 0, 0, 1, 30, 63), 3) D_TO_FUNCS = {1: (intvol.Lips1d, intvol.EC1d), 2: (intvol.Lips2d, intvol.EC2d), 3: (intvol.Lips3d, intvol.EC3d)} def test_ec(): for i in range(1, 4): _, box1 = randombox((40,) * i) f = D_TO_FUNCS[i][1] assert_almost_equal(f(box1), 1) # While we're here, test we can use different dtypes, and that values # other than 0 or 1 raise an error. for dtt in chain.from_iterable(SCTYPES[t] for t in ('int', 'uint', 'float')): box1_again = box1.copy().astype(dtt) assert_almost_equal(f(box1_again), 1) box1_again[(10,) * i] = 2 pytest.raises(ValueError, f, box1_again) def test_ec_disjoint(): for i in range(1, 4): e = D_TO_FUNCS[i][1] box1, box2, _, _ = nonintersecting_boxes((40,)*i) assert_almost_equal(e(box1 + box2), e(box1) + e(box2)) def test_lips_wrapping(): # Test that shapes touching the edge do not combine by wrapping b1 = np.zeros(40, np.int_) b1[:11] = 1 b2 = np.zeros(40, np.int_) b2[11:] = 1 # lines are disjoint assert (b1*b2).sum() == 0 c = np.indices(b1.shape).astype(np.float64) assert_array_equal(intvol.Lips1d(c, b1), (1, 10)) assert_array_equal(intvol.Lips1d(c, b2), (1, 28)) assert_array_equal(intvol.Lips1d(c, b1+b2), (1, 39.0)) # 2D b1 = b1[:,None] b2 = b2[:,None] # boxes are disjoint assert (b1*b2).sum() == 0 c = np.indices(b1.shape).astype(np.float64) assert_array_equal(intvol.Lips2d(c, b1), (1, 10, 0)) assert_array_equal(intvol.Lips2d(c, b2), (1, 28, 0)) assert_array_equal(intvol.Lips2d(c, b1+b2), (1, 39.0, 0)) # 3D b1 = b1[:,:,None] b2 = b2[:,:,None] assert b1.shape == (40,1,1) # boxes are disjoint assert (b1*b2).sum() == 0 c = np.indices(b1.shape).astype(np.float64) assert_array_equal(intvol.Lips3d(c, b1), (1, 10, 0, 0)) assert_array_equal(intvol.Lips3d(c, b2), (1, 28, 0, 0)) assert_array_equal(intvol.Lips3d(c, b1+b2), (1, 39.0, 0, 0)) # Shapes which are squeezable should still return sensible answers # Test simple ones line / box / volume for box_shape, exp_ivs in [[(10,),(1,9)], [(10,1),(1,9,0)], [(1,10),(1,9,0)], [(10,1,1), (1,9,0,0)], [(1, 10, 1), (1,9,0,0)], [(1, 1, 10), (1,9,0,0)]]: nd = len(box_shape) lips_func, ec_func = D_TO_FUNCS[nd] c = np.indices(box_shape).astype(np.float64) b = np.ones(box_shape, dtype=np.int_) assert_array_equal(lips_func(c, b), exp_ivs) assert ec_func(b) == exp_ivs[0] def test_lips1_disjoint(): phi = intvol.Lips1d box1, box2, edge1, edge2 = nonintersecting_boxes((30,)) c = np.indices((30,)).astype(np.float64) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,)+(30,)) # Test rotation causes no change in volumes U = randorth(p=6)[:1] e = np.dot(U.T, c.reshape((c.shape[0], np.prod(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal(phi(e, box1 + box2), (np.array( [elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(2)]) + np.array( [elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(2)]))) pytest.raises(ValueError, phi, c[...,None], box1) def test_lips2_disjoint(): phi = intvol.Lips2d box1, box2, edge1, edge2 = nonintersecting_boxes((40,40)) c = np.indices((40,40)).astype(np.float64) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,40,40)) # Test rotation causes no change in volumes U = randorth(p=6)[0:2] e = np.dot(U.T, c.reshape((c.shape[0], np.prod(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal(phi(e, box1 + box2), np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(3)]) + np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(3)]) ) pytest.raises(ValueError, phi, c[...,None], box1) pytest.raises(ValueError, phi, c[:,:,1], box1) def test_lips3_disjoint(): phi = intvol.Lips3d box1, box2, edge1, edge2 = nonintersecting_boxes((40,)*3) c = np.indices((40,)*3).astype(np.float64) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,40,40,40)) # Test rotation causes no change in volumes U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], np.prod(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal( phi(e, box1 + box2), (np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(4)]) + np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(4)]))) pytest.raises(ValueError, phi, c[...,None], box1) pytest.raises(ValueError, phi, c[:,:,:,1], box1) def test_lips3_nans(): # These boxes caused nans in the Lips3 disjoint box tests phi = intvol.Lips3d box1 = np.zeros((40,40,40), dtype=np.int_) box2 = box1.copy() box1[23:30,22:32,9:13] = 1 box2[7:22,0,8:17] = 1 c = np.indices(box1.shape).astype(np.float64) assert_array_equal(np.isnan(phi(c, box2)), False) U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], np.prod(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_array_equal(np.isnan(phi(e, box1 + box2)), False) def test_slices(): # Slices have EC 1... e = intvol.EC3d p = intvol.Lips3d m = np.zeros((40,)*3, np.int_) D = np.indices(m.shape).astype(np.float64) m[10,10,10] = 1 assert_almost_equal(e(m), 1) assert_almost_equal(p(D,m), [1,0,0,0]) m = np.zeros((40,)*3, np.int_) m[10,10:14,10] = 1 assert_almost_equal(e(m), 1) assert_almost_equal(p(D,m), [1,3,0,0]) m = np.zeros((40,)*3, np.int_) m[10,10:14,9:15] = 1 assert_almost_equal(e(m), 1) assert_almost_equal(p(D,m), [1,8,15,0]) def test_ec_wrapping(): # Test wrapping for EC1 calculation assert intvol.EC1d(np.ones((6,), dtype=np.int_)) == 1 box1 = np.array([1, 1, 0, 1, 1, 1], dtype=np.int_) assert intvol.EC1d(box1) == 2 # 2D box1 = np.zeros((3,6), dtype=np.int_) box1[1] = 1 assert intvol.EC2d(box1) == 1 box1[1, 3] = 0 assert intvol.EC2d(box1) == 2 # 3D box1 = np.zeros((3,6,3), dtype=np.int_) box1[1, :, 1] = 1 assert intvol.EC3d(box1) == 1 box1[1, 3, 1] = 0 assert intvol.EC3d(box1) == 2 nipy-0.6.1/nipy/algorithms/statistics/tests/test_mixed_effects.py000066400000000000000000000122321470056100100253710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing the glm module """ import numpy as np import numpy.random as nr import pytest from numpy.testing import assert_almost_equal, assert_array_almost_equal from ..bayesian_mixed_effects import two_level_glm from ..mixed_effects_stat import ( generate_data, mfx_stat, one_sample_ftest, one_sample_ttest, t_stat, two_sample_ftest, two_sample_ttest, ) def test_mfx(): """ Test the generic mixed-effects model""" n_samples, n_tests = 20, 100 np.random.seed(1) # generate some data V1 = np.random.rand(n_samples, n_tests) Y = generate_data(np.ones((n_samples, 1)), 0, 1, V1) X = np.random.randn(20, 3) # compute the test statistics t1, = mfx_stat(Y, V1, X, 1,return_t=True, return_f=False, return_effect=False, return_var=False) assert t1.shape == (n_tests,) assert t1.mean() < 5 / np.sqrt(n_tests) assert (t1.var() < 2) and (t1.var() > .5) t2, = mfx_stat(Y, V1, X * np.random.rand(3), 1) assert_almost_equal(t1, t2) f, = mfx_stat(Y, V1, X, 1, return_t=False, return_f=True) assert_almost_equal(t1 ** 2, f) v2, = mfx_stat(Y, V1, X, 1, return_t=False, return_var=True) assert (v2 > 0).all() fx, = mfx_stat(Y, V1, X, 1, return_t=False, return_effect=True) assert fx.shape == (n_tests,) def test_t_test(): """ test that the t test run """ n_samples, n_tests = 15, 100 data = nr.randn(n_samples, n_tests) t = t_stat(data) assert t.shape == (n_tests,) assert np.abs(t.mean() < 5 / np.sqrt(n_tests)) assert t.var() < 2 assert t.var() > .5 def test_two_sample_ttest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 4 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones(n_samples), 0, 1, vardata) # compute the test statistics u = np.concatenate((np.ones(5), np.zeros(10))) t2 = two_sample_ttest(data, vardata, u, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = two_sample_ttest(data, vardata, u, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_two_sample_ftest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 4 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics u = np.concatenate((np.ones(5), np.zeros(10))) t2 = two_sample_ftest(data, vardata, u, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = two_sample_ftest(data, vardata, u, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_mfx_ttest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 100 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics t2 = one_sample_ttest(data, vardata, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = one_sample_ttest(data, vardata, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_mfx_ftest(): """ test that the mfx ftest indeed runs """ n_samples, n_tests = 15, 100 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics f = one_sample_ftest(data, vardata, n_iter=5) assert f.shape == (n_tests,) assert (np.abs(f.mean() - 1) < 1) assert f.var() < 10 assert f.var() > .2 def test_two_level_glm(): nsub = 10 npts = 100 reg1 = np.ones(nsub) reg2 = np.random.random(nsub) X = np.array((reg1, reg2)).T y = np.repeat(np.reshape(reg1 + reg2, (nsub, 1)), npts, axis=1) vy = np.zeros((nsub, npts)) beta, s2, dof = two_level_glm(y, vy, X) assert_array_almost_equal(beta, 1) assert_array_almost_equal(s2, 0) def test_two_level_glm_novar(): X = np.random.normal(0, 1, size=(100, 10)) y = np.random.normal(0, 1, size=(100, 50)) vy = np.zeros((100, 50)) beta, s2, dof = two_level_glm(y, vy, X) beta_error = np.mean(beta ** 2) s2_error = np.abs(np.mean(s2) - 1) print(f'Errors: {beta_error:f} (beta), {s2_error:f} (s2)') assert beta_error < 0.1 assert s2_error < 0.1 def test_two_level_glm_error(): # this tests whether two_level_glm raises a value error if the # design matrix has more regressors than the number of # observations X = np.random.normal(0, 1, size=(10, 11)) y = np.random.normal(0, 1, size=(10, 5)) vy = np.zeros((10, 5)) pytest.raises(ValueError, two_level_glm, y, vy, X) nipy-0.6.1/nipy/algorithms/statistics/tests/test_onesample.py000066400000000000000000000025011470056100100245450ustar00rootroot00000000000000 import numpy as np from scipy.stats import norm from nipy.algorithms.statistics import onesample from nipy.testing import assert_almost_equal def test_estimate_varatio(p=1.0e-04, sigma2=1): # This is a random test, but is design to fail only rarely.... ntrial = 300 n = 10 random = np.zeros(10) rsd = np.zeros(n) sd = np.multiply.outer( np.linspace(0,1,40), np.ones(ntrial) ) + np.ones((40,ntrial)) for i in range(n): Y = np.random.standard_normal((40,ntrial)) * np.sqrt(sd**2 + sigma2) results = onesample.estimate_varatio(Y, sd) results = onesample.estimate_varatio(Y, sd) random[i] = results['random'].mean() rsd[i] = results['random'].std() # Compute the mean just to be sure it works W = 1. / (sd**2 + results['random']) mu = onesample.estimate_mean(Y, np.sqrt(sd**2 + results['random']))['effect'] assert_almost_equal(mu, (W*Y).sum(0) / W.sum(0)) rsd = np.sqrt((rsd**2).mean() / ntrial) T = np.fabs((random.mean() - sigma2) / (rsd / np.sqrt(n))) # should fail one in every 1/p trials at least for sigma > 0, # small values of sigma seem to have some bias if T > norm.ppf(1-p/2): raise ValueError('large T value, but algorithm works, ' 'could be a statistical failure') nipy-0.6.1/nipy/algorithms/statistics/tests/test_quantile.py000066400000000000000000000025201470056100100244050ustar00rootroot00000000000000""" Test quartile functions """ from itertools import chain import numpy as np from numpy import median as np_median from numpy.testing import assert_array_almost_equal, assert_array_equal from scipy.stats import scoreatpercentile as sp_percentile from nipy.utils import SCTYPES from .._quantile import _median, _quantile NUMERIC_TYPES = list( chain.from_iterable( SCTYPES[t] for t in ("int", "uint", "float", "complex") ) ) def another_percentile(arr, pct, axis): # numpy.percentile not available until after numpy 1.4.1 return np.apply_along_axis(sp_percentile, axis, arr.astype(float), pct) def test_median(): for dtype in NUMERIC_TYPES: for shape in ((10,), (10, 11), (10, 11, 12)): X = (100 * (np.random.random(shape) - .5)).astype(dtype) for a in range(X.ndim): assert_array_equal(_median(X, axis=a).squeeze(), np_median(X.astype(np.float64), axis=a)) def test_quantile(): for dtype in NUMERIC_TYPES: for shape in ((10,), (10, 11), (10, 11, 12)): X = (100 * (np.random.random(shape) - .5)).astype(dtype) for a in range(X.ndim): assert_array_almost_equal( _quantile(X, .75, axis=a, interp=True).squeeze(), another_percentile(X, 75, axis=a)) nipy-0.6.1/nipy/algorithms/statistics/tests/test_rft.py000066400000000000000000000361621470056100100233670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats from scipy.special import gammaln, hermitenorm try: from scipy.misc import factorial except ImportError: from scipy.special import factorial import pytest from numpy.testing import assert_almost_equal from .. import rft #def rho(x, dim, df=np.inf): # """ # EC densities for T and Gaussian (df=inf) random fields. # """ # # m = df # # if dim > 0: # x = np.asarray(x, np.float64) #--jarrod: shouldn't Q be rft.Q?? # q = Q(dim, dfd=df)(x) # # if np.isfinite(m): # q *= np.power(1 + x**2/m, -(m-1)/2.) # else: # q *= np.exp(-x**2/2) # # return q * np.power(2*np.pi, -(dim+1)/2.) # else: # if np.isfinite(m): # return scipy.stats.t.sf(x, df) # else: # return scipy.stats.norm.sf(x) def test_Q(): pytest.raises(ValueError, rft.Q, -1) pytest.raises(ValueError, rft.Q, 0) x = np.arange(-9, 10) for dim in range(1, 4): res = rft.Q(dim) assert_almost_equal(res(x), hermitenorm(dim - 1)(x)) def K(dim=4, dfn=7, dfd=np.inf): r""" Determine the polynomial K in: Worsley, K.J. (1994). 'Local maxima and the expected Euler characteristic of excursion sets of \chi^2, F and t fields.' Advances in Applied Probability, 26:13-42. If dfd=inf, return the limiting polynomial. """ def lbinom(n, j): return gammaln(n+1) - gammaln(j+1) - gammaln(n-j+1) m = dfd n = dfn D = dim k = np.arange(D) coef = 0 for j in range(int(np.floor((D-1)/2.)+1)): if np.isfinite(m): t = (gammaln((m+n-D)/2.+j) - gammaln(j+1) - gammaln((m+n-D)/2.)) t += lbinom(m-1, k-j) - k * np.log(m) else: _t = np.power(2., -j) / (factorial(k-j) * factorial(j)) t = np.log(_t) t[np.isinf(_t)] = -np.inf t += lbinom(n-1, D-1-j-k) coef += (-1)**(D-1) * factorial(D-1) * np.exp(t) * np.power(-1.*n, k) return np.poly1d(coef[::-1]) def F(x, dim, dfd=np.inf, dfn=1): """ EC densities for F and Chi^2 (dfd=inf) random fields. """ m = float(dfd) n = float(dfn) D = float(dim) if dim > 0: x = np.asarray(x, np.float64) k = K(dim=dim, dfd=dfd, dfn=dfn)(x) if np.isfinite(m): f = x*n/m t = -np.log(1 + f) * (m+n-2.) / 2. t += np.log(f) * (n-D) / 2. t += gammaln((m+n-D)/2.) - gammaln(m/2.) else: f = x*n t = np.log(f/2.) * (n-D) / 2. - f/2. t -= np.log(2*np.pi) * D / 2. + np.log(2) * (D-2)/2. + gammaln(n/2.) k *= np.exp(t) return k else: if np.isfinite(m): return scipy.stats.f.sf(x, dfn, dfd) else: return scipy.stats.chi.sf(x, dfn) def polyF(dim, dfd=np.inf, dfn=1): r""" Return the polynomial part of the EC density when evaluating the polynomial on the sqrt(F) scale (or sqrt(chi^2)=chi scale). The polynomial is such that, if dfd=inf, the F EC density in is just:: polyF(dim,dfn=dfn)(sqrt(dfn*x)) * exp(-dfn*x/2) * (2\pi)^{-(dim+1)/2} """ n = float(dfn) m = float(dfd) D = float(dim) p = K(dim=D, dfd=m, dfn=n) c = p.c # Take care of the powers of n (i.e. we want polynomial K evaluated # at */n). for i in range(p.order+1): c[i] /= np.power(n, p.order-i) # Now, turn it into a polynomial of x when evaluated at x**2 C = np.zeros((2*c.shape[0]-1,)) for i in range(c.shape[0]): C[2*i] = c[i] # Multiply by the factor x^(dfn-dim) in front (see Theorem 4.6 of # Worsley (1994), cited above. if dim > dfn: # divide by x^(dim-dfn) C = C[0:(C.shape[0] - (dim-dfn))] else: # multiply by x^(dim-dfn) C = np.hstack([C, np.zeros((dfn-dim,))]) # Fix up constant in front if np.isfinite(m): C *= np.exp(gammaln((m+n-D)/2.) - gammaln(m/2.)) * np.power(m, -(n-D)/2.) else: C *= np.power(2, -(n-D)/2.) C /= np.power(2, (dim-2)/2.) * np.exp(gammaln(n/2.)) C *= np.sqrt(2*np.pi) return np.poly1d(C) def F_alternative(x, dim, dfd=np.inf, dfn=1): """ Another way to compute F EC density as a product of a polynomial and a power of (1+x^2/m). """ n = float(dfn) m = float(dfd) x = np.asarray(x, np.float64) p = polyF(dim=dim, dfd=dfd, dfn=dfn) v = p(np.sqrt(n*x)) if np.isfinite(m): v *= np.power(1 + n*x/m, -(m+n-2.) / 2.) else: v *= np.exp(-n*x/2) v *= np.power(2*np.pi, -(dim+1)/2.) return v def test_polynomial1(): # Polynomial part of Gaussian densities are Hermite polynomials. for dim in range(1,10): q = rft.Gaussian().quasi(dim) h = hermitenorm(dim-1) assert_almost_equal(q.c, h.c) def test_polynomial2(): # EC density of chi^2(1) is 2 * EC density of Gaussian so polynomial part is # a factor of 2 as well. for dim in range(1,10): q = rft.ChiSquared(dfn=1).quasi(dim) h = hermitenorm(dim-1) assert_almost_equal(q.c, 2*h.c) # @dec.slow def test_polynomial3(): # EC density of F with infinite dfd is the same as chi^2 -- # polynomials should be the same. for dim in range(10): for dfn in range(5,10): q1 = rft.FStat(dfn=dfn, dfd=np.inf).quasi(dim) q2 = rft.ChiSquared(dfn=dfn).quasi(dim) assert_almost_equal(q1.c, q2.c) # @dec.slow def test_chi1(): # EC density of F with infinite dfd is the same as chi^2 -- EC should be the # same. x = np.linspace(0.1,10,100) for dim in range(10): for dfn in range(5,10): c = rft.ChiSquared(dfn=dfn) f = rft.FStat(dfn=dfn, dfd=np.inf) chi1 = c.density(dfn*x, dim) chi2 = f.density(x, dim) assert_almost_equal(chi1, chi2) def test_chi2(): # Quasi-polynomial part of the chi^2 EC density should # be the limiting polyF. for dim in range(1,10): for dfn in range(5,10): c = rft.ChiSquared(dfn=dfn) p1 = c.quasi(dim=dim) p2 = polyF(dim=dim, dfn=dfn) assert_almost_equal(p1.c, p2.c) def test_chi3(): # EC density of chi^2(1) is 2 * EC density of Gaussian squared so EC # densities factor of 2 as well. x = np.linspace(0.1,10,100) for dim in range(10): g = rft.Gaussian() c = rft.ChiSquared(dfn=1) ec1 = g.density(np.sqrt(x), dim) ec2 = c.density(x, dim) assert_almost_equal(2*ec1, ec2) def test_T1(): # O-dim EC density should be tail probality. x = np.linspace(0.1,10,100) for dfd in [40,50]: t = rft.TStat(dfd=dfd) assert_almost_equal(t(x), scipy.stats.t.sf(x, dfd)) t = rft.TStat(dfd=np.inf) assert_almost_equal(t(x), scipy.stats.norm.sf(x)) def test_search(): # Test that the search region works. search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stat = rft.Gaussian(search=search) v1 = stat(x) v2 = ((5*x + 4*np.sqrt(2*np.pi)) * np.exp(-x**2/2.) / np.power(2*np.pi, 1.5) + 3 * scipy.stats.norm.sf(x)) assert_almost_equal(v1, v2) # @dec.slow def test_search1(): # Test that the search region works. # XXX - we are not testing anything search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stats = [rft.Gaussian()] for dfn in range(5,10): for dfd in [40,50,np.inf]: stats.append(rft.FStat(dfn=dfn, dfd=dfd)) stats.append(rft.TStat(dfd=dfd)) stats.append(rft.ChiSquared(dfn=dfn)) for dim in range(7): for stat in stats: # XXX - v1 appears to be unused v1 = stat(x, search=search) v2 = 0 for i in range(search.mu.shape[0]): v2 += stat.density(x, i) * search.mu[i] # @dec.slow def test_search2(): # Test that the search region works. search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stats = [rft.Gaussian(search=search)] ostats = [rft.Gaussian()] for dfn in range(5,10): for dfd in [40,50,np.inf]: stats.append(rft.FStat(dfn=dfn, dfd=dfd, search=search)) ostats.append(rft.FStat(dfn=dfn, dfd=dfd)) stats.append(rft.TStat(dfd=dfd, search=search)) ostats.append(rft.TStat(dfd=dfd)) stats.append(rft.ChiSquared(dfn=dfn, search=search)) ostats.append(rft.ChiSquared(dfn=dfn)) for i in range(len(stats)): stat = stats[i] ostat = ostats[i] v1 = stat(x) v2 = 0 for j in range(search.mu.shape[0]): v2 += ostat.density(x, j) * search.mu[j] assert_almost_equal(v1, v2) def test_search3(): # In the Gaussian case, test that search and product give same results. search = rft.IntrinsicVolumes([3,4,5,7]) g1 = rft.Gaussian(search=search) g2 = rft.Gaussian(product=search) x = np.linspace(0.1,10,100) y1 = g1(x) y2 = g2(x) assert_almost_equal(y1, y2) def test_search4(): # Test that the search/product work well together search = rft.IntrinsicVolumes([3,4,5]) product = rft.IntrinsicVolumes([1,2]) x = np.linspace(0.1,10,100) g1 = rft.Gaussian() g2 = rft.Gaussian(product=product) y = g2(x, search=search) z = g1(x, search=search*product) assert_almost_equal(y, z) def test_search5(): # Test that the search/product work well together search = rft.IntrinsicVolumes([3,4,5]) product = rft.IntrinsicVolumes([1,2]) prodsearch = product * search x = np.linspace(0,5,101) g1 = rft.Gaussian() g2 = rft.Gaussian(product=product) z = 0 for i in range(prodsearch.mu.shape[0]): z += g1.density(x, i) * prodsearch.mu[i] y = g2(x, search=search) assert_almost_equal(y, z) # @dec.slow def test_T2(): # T**2 is an F with dfn=1 x = np.linspace(0,5,101) for dfd in [40,50,np.inf]: t = rft.TStat(dfd=dfd) f = rft.FStat(dfd=dfd, dfn=1) for dim in range(7): y = 2*t.density(x, dim) z = f.density(x**2, dim) assert_almost_equal(y, z) # @dec.slow def test_hotelling1(): # Asymptotically, Hotelling is the same as F which is the same as chi^2. x = np.linspace(0.1,10,100) for dim in range(7): for dfn in range(5,10): h = rft.Hotelling(k=dfn).density(x*dfn, dim) f = rft.FStat(dfn=dfn).density(x, dim) assert_almost_equal(h, f) # @dec.slow def test_hotelling4(): # Hotelling T^2 should just be like taking product with sphere. x = np.linspace(0.1,10,100) for dim in range(7): search = rft.IntrinsicVolumes([0]*(dim) + [1]) for k in range(5, 10): p = rft.spherical_search(k) for dfd in [np.inf,40,50]: f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search) t = 2*rft.TStat(dfd=dfd)(np.sqrt(x), search=p*search) h2 = 2*rft.Hotelling(k=k, dfd=dfd).density(x, dim) h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search) assert_almost_equal(h, t) assert_almost_equal(h, f) assert_almost_equal(h, h2) search = rft.IntrinsicVolumes([3,4,5]) for k in range(5, 10): p = rft.spherical_search(k) for dfd in [np.inf,40,50]: f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search) h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search) h2 = 0 for i in range(search.mu.shape[0]): h2 += 2*rft.Hotelling(k=k, dfd=dfd).density(x, i) * search.mu[i] assert_almost_equal(h, f) assert_almost_equal(h, h2) def test_hotelling2(): # Marginally, Hotelling's T^2(k) with m degrees of freedom # in the denominator satisfies # (m-k+1)/(mk) T^2 \sim F_{k,m-k+1}. x = np.linspace(0.1,10,100) for dfn in range(6, 10): h = rft.Hotelling(k=dfn)(x) chi = rft.ChiSquared(dfn=dfn)(x) assert_almost_equal(h, chi) chi2 = scipy.stats.chi2.sf(x, dfn) assert_almost_equal(h, chi2) # XXX - p appears to be unused p = rft.spherical_search(dfn) for dfd in [40,50]: fac = (dfd-dfn+1.)/(dfd*dfn) h = rft.Hotelling(dfd=dfd,k=dfn)(x) f = scipy.stats.f.sf(x*fac, dfn, dfd-dfn+1) f2 = rft.FStat(dfd=dfd-dfn+1,dfn=dfn)(x*fac) assert_almost_equal(f2, f) assert_almost_equal(h, f) # @dec.slow def test_roy1(): # EC densities of Roy with dfn=1 should be twice EC densities of Hotelling # T^2's. x = np.linspace(0.1,10,100) for dfd in [40,50,np.inf]: for k in [1,4,6]: for dim in range(7): h = 2*rft.Hotelling(dfd=dfd,k=k).density(x, dim) r = rft.Roy(dfd=dfd,k=k,dfn=1).density(x, dim) assert_almost_equal(h, r) # @dec.slow def test_onesidedF(): # EC densities of one sided F should be a difference of # F EC densities x = np.linspace(0.1,10,100) for dfd in [40,50,np.inf]: for dfn in range(2,10): for dim in range(7): f1 = rft.FStat(dfd=dfd,dfn=dfn).density(x, dim) f2 = rft.FStat(dfd=dfd,dfn=dfn-1).density(x, dim) onesided = rft.OneSidedF(dfd=dfd,dfn=dfn).density(x, dim) assert_almost_equal(onesided, 0.5*(f1-f2)) # @dec.slow def test_multivariate_forms(): # MVform with one sphere is sqrt(chi^2), two spheres is sqrt(Roy) with # infinite degrees of freedom. x = np.linspace(0.1,10,100) for k1 in range(5,10): m = rft.MultilinearForm(k1) c = rft.ChiSquared(k1) for dim in range(7): mx = m.density(x, dim) cx = c.density(x**2, dim) assert_almost_equal(mx, cx) for k2 in range(5,10): m = rft.MultilinearForm(k1,k2) r = rft.Roy(k=k1, dfn=k2, dfd=np.inf) for dim in range(7): mx = 2*m.density(x, dim) rx = r.density(x**2/k2, dim) assert_almost_equal(mx, rx) def test_scale(): # Smoke test? a = rft.IntrinsicVolumes([2,3,4]) b = rft.scale_space(a, [3,4], kappa=0.5) def test_F1(): x = np.linspace(0.1,10,100) for dim in range(1,10): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = F(x, dim, dfn=dfn, dfd=dfd) f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd) assert_almost_equal(f1, f2) # @dec.slow def test_F2(): x = np.linspace(0.1,10,100) for dim in range(3,7): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim) f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd) assert_almost_equal(f1, f2) # @dec.slow def test_F3(): x = np.linspace(0.1,10,100) for dim in range(3,7): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim) f2 = F(x, dim, dfn=dfn, dfd=dfd) assert_almost_equal(f1, f2) nipy-0.6.1/nipy/algorithms/statistics/tests/test_utils.py000066400000000000000000000046541470056100100237350ustar00rootroot00000000000000 import numpy as np import pytest import scipy.linalg as spl from numpy.testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from scipy.stats import norm from nipy.utils import SCTYPES from ..utils import check_cast_bin8, multiple_fast_inv, multiple_mahalanobis, z_score def test_z_score(): p = np.random.rand(10) z = z_score(p) assert_array_almost_equal(norm.sf(z), p) def test_mahalanobis(): x = np.random.rand(100) / 100 A = np.random.rand(100, 100) / 100 A = np.dot(A.transpose(), A) + np.eye(100) mah = np.dot(x, np.dot(np.linalg.inv(A), x)) assert_almost_equal(mah, multiple_mahalanobis(x, A), decimal=1) def test_mahalanobis2(): x = np.random.randn(100, 3) Aa = np.zeros([100, 100, 3]) for i in range(3): A = np.random.randn(120, 100) A = np.dot(A.T, A) Aa[:, :, i] = A i = np.random.randint(3) mah = np.dot(x[:, i], np.dot(np.linalg.inv(Aa[:, :, i]), x[:, i])) f_mah = (multiple_mahalanobis(x, Aa))[i] assert_almost_equal(mah, f_mah) def test_multiple_fast_inv(): shape = (10, 20, 20) X = np.random.randn(*shape) X_inv_ref = np.zeros(shape) for i in range(shape[0]): X[i] = np.dot(X[i], X[i].T) X_inv_ref[i] = spl.inv(X[i]) X_inv = multiple_fast_inv(X) assert_array_almost_equal(X_inv_ref, X_inv) def assert_equal_bin8(actual, expected): res = check_cast_bin8(actual) assert res.shape == actual.shape assert res.dtype.type == np.uint8 assert_array_equal(res, expected) def test_check_cast_bin8(): # Function to return np.uint8 array with check whether array is binary. for in_dtype in SCTYPES['int'] + SCTYPES['uint']: assert_equal_bin8(np.array([0, 1, 1, 1], in_dtype), [0, 1, 1, 1]) assert_equal_bin8(np.array([[0, 1], [1, 1]], in_dtype), [[0, 1], [1, 1]]) pytest.raises(ValueError, check_cast_bin8, np.array([0, 1, 2], dtype=in_dtype)) for in_dtype in SCTYPES['float']: assert_equal_bin8(np.array([0, 1, 1, -0], np.float64), [0, 1, 1, 0]) assert_equal_bin8(np.array([[0, 1], [1, -0]], np.float64), [[0, 1], [1, 0]]) pytest.raises(ValueError, check_cast_bin8, np.array([0, 0.1, 1], dtype=in_dtype)) pytest.raises(ValueError, check_cast_bin8, np.array([0, -1, 1], dtype=in_dtype)) nipy-0.6.1/nipy/algorithms/statistics/utils.py000066400000000000000000000333251470056100100215310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from itertools import combinations import numpy as np from scipy.stats import norm TINY = 1e-16 def z_score(pvalue): """ Return the z-score corresponding to a given p-value. """ pvalue = np.minimum(np.maximum(pvalue, 1.e-300), 1. - TINY) z = norm.isf(pvalue) return z def multiple_fast_inv(a): """ Compute the inverse of a set of arrays in-place Parameters ---------- a: array_like of shape (n_samples, M, M) Set of square matrices to be inverted. `a` is changed in place. Returns ------- a: ndarray shape (n_samples, M, M) The input array `a`, overwritten with the inverses of the original 2D arrays in ``a[0], a[1], ...``. Thus ``a[0]`` replaced with ``inv(a[0])`` etc. Raises ------ LinAlgError : If `a` is singular. ValueError : If `a` is not square, or not 2-dimensional. Notes ----- This function is copied from scipy.linalg.inv, but with some customizations for speed-up from operating on multiple arrays. It also has some conditionals to work with different scipy versions. """ # Consider errors for sparse, masked, object arrays, as for # _asarray_validated? from scipy.linalg.lapack import get_lapack_funcs S, M, N = a.shape if M != N: raise ValueError('a must have shape(n_samples, M, M)') a = np.asarray_chkfinite(a) getrf, getri = get_lapack_funcs(('getrf','getri'), (a[0],)) # Calculate lwork on different scipy versions try: getri_lwork, = get_lapack_funcs(('getri_lwork',), (a[0],)) except (ValueError, AttributeError): # scipy < 0.15 # scipy 0.10, 0.11 -> AttributeError # scipy 0.12, 0.13, 0.14 -> ValueError from scipy.linalg import calc_lwork lwork = calc_lwork.getri(getri.prefix, M)[1] else: # scipies >= 0.15 have getri_lwork function lwork, info = getri_lwork(M) if info != 0: raise ValueError('internal getri work space query failed: %d' % (info,)) lwork = int(lwork.real) # XXX: the following line fixes curious SEGFAULT when # benchmarking 500x500 matrix inverse. This seems to # be a bug in LAPACK ?getri routine because if lwork is # minimal (when using lwork[0] instead of lwork[1]) then # all tests pass. Further investigation is required if # more such SEGFAULTs occur. lwork = int(1.01 * lwork) for i, ai in enumerate(a): lu, piv, info = getrf(ai, overwrite_a=True) if info == 0: a[i], info = getri(lu, piv, lwork=lwork, overwrite_lu=1) if info > 0: raise np.linalg.LinAlgError("singular matrix") if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'getrf|getri' % -info) return a def multiple_mahalanobis(effect, covariance): """Returns the squared Mahalanobis distance for a given set of samples Parameters ---------- effect: array of shape (n_features, n_samples), Each column represents a vector to be evaluated covariance: array of shape (n_features, n_features, n_samples), Corresponding covariance models stacked along the last axis Returns ------- sqd: array of shape (n_samples,) the squared distances (one per sample) """ # check size if effect.ndim == 1: effect = effect[:, np.newaxis] if covariance.ndim == 2: covariance = covariance[:, :, np.newaxis] if effect.shape[0] != covariance.shape[0]: raise ValueError('Inconsistant shape for effect and covariance') if covariance.shape[0] != covariance.shape[1]: raise ValueError('Inconsistant shape for covariance') # transpose and make contuguous for the sake of speed Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T) # compute the inverse of the covariances Kt = multiple_fast_inv(Kt) # derive the squared Mahalanobis distances sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1) return sqd def complex(maximal=[(0, 3, 2, 7), (0, 6, 2, 7), (0, 7, 5, 4), (0, 7, 5, 1), (0, 7, 4, 6), (0, 3, 1, 7)]): """ Faces from simplices Take a list of maximal simplices (by default a triangulation of a cube into 6 tetrahedra) and computes all faces Parameters ---------- maximal : sequence of sequences, optional Default is triangulation of cube into tetrahedra Returns ------- faces : dict """ faces = {} l = [len(list(x)) for x in maximal] for i in range(np.max(l)): faces[i+1] = set() for simplex in maximal: simplex = list(simplex) simplex.sort() for k in range(1,len(simplex)+1): for v in combinations(simplex, k): if len(v) == 1: v = v[0] faces[k].add(v) return faces def cube_with_strides_center(center=[0,0,0], strides=[4, 2, 1]): """ Cube in an array of voxels with a given center and strides. This triangulates a cube with vertices [center[i] + 1]. The dimension of the cube is determined by len(center) which should agree with len(center). The allowable dimensions are [1,2,3]. Parameters ---------- center : (d,) sequence of int, optional Default is [0, 0, 0] strides : (d,) sequence of int, optional Default is [4, 2, 1]. These are the strides given by ``np.ones((2,2,2), np.bool_).strides`` Returns ------- complex : dict A dictionary with integer keys representing a simplicial complex. The vertices of the simplicial complex are the indices of the corners of the cube in a 'flattened' array with specified strides. """ d = len(center) if not 0 < d <= 3: raise ValueError('dimensionality must be 0 < d <= 3') if len(strides) != d: raise ValueError('center and strides must have the same length') if d == 3: maximal = [(0, 3, 2, 7), (0, 6, 2, 7), (0, 7, 5, 4), (0, 7, 5, 1), (0, 7, 4, 6), (0, 3, 1, 7)] vertices = [] for k in range(2): for j in range(2): for i in range(2): vertices.append((center[0]+i)*strides[0] + (center[1]+j)*strides[1] + (center[2]+k)*strides[2]) elif d == 2: maximal = [(0,1,3), (0,2,3)] vertices = [] for j in range(2): for i in range(2): vertices.append((center[0]+i)*strides[0] + (center[1]+j)*strides[1]) elif d == 1: maximal = [(0,1)] vertices = [center[0],center[0]+strides[0]] mm = [] for m in maximal: nm = [vertices[j] for j in m] mm.append(nm) maximal = [tuple(vertices[j] for j in m) for m in maximal] return complex(maximal) def join_complexes(*complexes): """ Join a sequence of simplicial complexes. Returns the union of all the particular faces. """ faces = {} nmax = np.array([len(c) for c in complexes]).max() for i in range(nmax): faces[i+1] = set() for c in complexes: for i in range(nmax): if i+1 in c: faces[i+1] = faces[i+1].union(c[i+1]) return faces def decompose3d(shape, dim=4): """ Return all (dim-1)-dimensional simplices in a triangulation of a cube of a given shape. The vertices in the triangulation are indices in a 'flattened' array of the specified shape. """ # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel unique = {} strides = np.empty(shape, np.bool_).strides union = join_complexes(*[cube_with_strides_center((0,0,-1), strides), cube_with_strides_center((0,-1,0), strides), cube_with_strides_center((0,-1,-1), strides), cube_with_strides_center((-1,0,0), strides), cube_with_strides_center((-1,0,-1), strides), cube_with_strides_center((-1,-1,0), strides), cube_with_strides_center((-1,-1,-1), strides)]) c = cube_with_strides_center((0,0,0), strides) for i in range(4): unique[i+1] = c[i+1].difference(union[i+1]) if dim in unique and dim > 1: d = unique[dim] for i in range(shape[0]-1): for j in range(shape[1]-1): for k in range(shape[2]-1): index = i*strides[0]+j*strides[1]+k*strides[2] for l in d: yield [index+ii for ii in l] # There are now contributions from three two-dimensional faces for _strides, _shape in zip([(strides[0], strides[1]), (strides[0], strides[2]), (strides[1], strides[2])], [(shape[0], shape[1]), (shape[0], shape[2]), (shape[1], shape[2])]): unique = {} union = join_complexes(*[cube_with_strides_center((0,-1), _strides), cube_with_strides_center((-1,0), _strides), cube_with_strides_center((-1,-1), _strides)]) c = cube_with_strides_center((0,0), _strides) for i in range(3): unique[i+1] = c[i+1].difference(union[i+1]) if dim in unique and dim > 1: d = unique[dim] for i in range(_shape[0]-1): for j in range(_shape[1]-1): index = i*_strides[0]+j*_strides[1] for l in d: yield [index+ii for ii in l] # Finally the one-dimensional faces for _stride, _shape in zip(strides, shape): unique = {} union = cube_with_strides_center((-1,), [_stride]) c = cube_with_strides_center((0,), [_stride]) for i in range(2): unique[i+1] = c[i+1].difference(union[i+1]) if dim in unique and dim > 1: d = unique[dim] for i in range(_shape-1): index = i*_stride for l in d: yield [index+ii for ii in l] if dim == 1: for i in range(np.prod(shape)): yield i def decompose2d(shape, dim=3): """ Return all (dim-1)-dimensional simplices in a triangulation of a square of a given shape. The vertices in the triangulation are indices in a 'flattened' array of the specified shape. """ # First do the interior contributions. # We first figure out which vertices, edges, triangles # are uniquely associated with an interior pixel unique = {} strides = np.empty(shape, np.bool_).strides union = join_complexes(*[cube_with_strides_center((0,-1), strides), cube_with_strides_center((-1,0), strides), cube_with_strides_center((-1,-1), strides)]) c = cube_with_strides_center((0,0), strides) for i in range(3): unique[i+1] = c[i+1].difference(union[i+1]) if dim in unique and dim > 1: d = unique[dim] for i in range(shape[0]-1): for j in range(shape[1]-1): index = i*strides[0]+j*strides[1] for l in d: yield [index+ii for ii in l] # Now, the one-dimensional faces for _stride, _shape in zip(strides, shape): unique = {} union = cube_with_strides_center((-1,), [_stride]) c = cube_with_strides_center((0,), [_stride]) for i in range(2): unique[i+1] = c[i+1].difference(union[i+1]) if dim in unique and dim > 1: d = unique[dim] for i in range(_shape-1): index = i*_stride for l in d: yield [index+ii for ii in l] if dim == 1: for i in range(np.prod(shape)): yield i def test_EC3(shape): ts = 0 fs = 0 es = 0 vs = 0 ec = 0 for t in decompose3d(shape, dim=4): ec -= 1; ts += 1 for f in decompose3d(shape, dim=3): ec += 1; fs += 1 for e in decompose3d(shape, dim=2): ec -= 1; es += 1 for v in decompose3d(shape, dim=1): ec += 1; vs += 1 return ts, fs, es, vs, ec # Tell testing framework not to run this as a test test_EC3.__test__ = False def test_EC2(shape): fs = 0 es = 0 vs = 0 ec = 0 for f in decompose2d(shape, dim=3): ec += 1; fs += 1 for e in decompose2d(shape, dim=2): ec -= 1; es += 1 for v in decompose2d(shape, dim=1): ec += 1; vs += 1 return fs, es, vs, ec # Tell testing framework not to run this as a test test_EC2.__test__ = False def check_cast_bin8(arr): """ Return binary array `arr` as uint8 type, or raise if not binary. Parameters ---------- arr : array-like Returns ------- bin8_arr : uint8 array `bin8_arr` has same shape as `arr`, is of dtype ``np.uint8``, with values 0 and 1 only. Raises ------ ValueError When the array is not binary. Specifically, raise if, for any element ``e``, ``e != (e != 0)``. """ if np.any(arr != (arr !=0)): raise ValueError('input array should only contain values 0 and 1') return arr.astype(np.uint8) nipy-0.6.1/nipy/algorithms/tests/000077500000000000000000000000001470056100100167615ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/tests/__init__.py000066400000000000000000000000001470056100100210600ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/tests/test_interpolator.py000066400000000000000000000061651470056100100231240ustar00rootroot00000000000000""" Testing interpolation module """ from itertools import product import numpy as np import pytest from numpy.testing import assert_almost_equal, assert_array_equal from scipy.ndimage import map_coordinates from nipy.core.api import Image, vox2mni from ..interpolation import ImageInterpolator def test_interp_obj(): arr = np.arange(24).reshape((2, 3, 4)) coordmap = vox2mni(np.eye(4)) img = Image(arr, coordmap) interp = ImageInterpolator(img) assert interp.mode == 'constant' assert interp.order == 3 # order is read-only pytest.raises(AttributeError, setattr, interp, 'order', 1) interp = ImageInterpolator(img, mode='nearest') assert interp.mode == 'nearest' # mode is read-only pytest.raises(AttributeError, setattr, interp, 'mode', 'reflect') def test_interpolator(): shape = (2, 3, 4) arr = np.arange(24).reshape(shape) coordmap = vox2mni(np.eye(4)) img = Image(arr, coordmap) ixs = np.indices(arr.shape).astype(float) for order in range(5): interp = ImageInterpolator(img, mode='nearest', order=order) # Interpolate at existing points. assert_almost_equal(interp.evaluate(ixs), arr) # Interpolate at half voxel shift ixs_x_shift = ixs.copy() # Interpolate inside and outside at knots ixs_x_shift[0] += 1 res = interp.evaluate(ixs_x_shift) assert_almost_equal(res, np.tile(arr[1], (2, 1, 1))) ixs_x_shift[0] -= 2 res = interp.evaluate(ixs_x_shift) assert_almost_equal(res, np.tile(arr[0], (2, 1, 1))) # Interpolate at mid-points inside and outside ixs_x_shift[0] += 0.5 res = interp.evaluate(ixs_x_shift) # Check inside. mid_arr = np.mean(arr, axis=0) if order > 0 else arr[1] assert_almost_equal(res[1], mid_arr) # Interpolate off top right corner with different modes assert_almost_equal(interp.evaluate([0, 0, 4]), arr[0, 0, -1]) interp = ImageInterpolator(img, mode='constant', order=order, cval=0) assert_array_equal(interp.evaluate([0, 0, 4]), 0) interp = ImageInterpolator(img, mode='constant', order=order, cval=1) assert_array_equal(interp.evaluate([0, 0, 4]), 1) # Check against direct ndimage interpolation # Need floating point input array to replicate # our floating point backing store. farr = arr.astype(float) for offset, axis, mode in product(np.linspace(-2, 2, 15), range(3), ('nearest', 'constant')): interp = ImageInterpolator(img, mode=mode, order=order) coords = ixs.copy() slicer = tuple(None if i == axis else 0 for i in range(3)) coords[slicer] = coords[slicer] + offset actual = interp.evaluate(coords) expected = map_coordinates(farr, coords, mode=mode, order=order) assert_almost_equal(actual, expected) del interp nipy-0.6.1/nipy/algorithms/tests/test_kernel_smooth.py000066400000000000000000000114061470056100100232450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test for smoothing with kernels """ import numpy as np import pytest from numpy.random import randint from numpy.testing import assert_array_almost_equal from transforms3d.taitbryan import euler2mat from ... import load_image from ...core.api import AffineTransform, Image, compose, drop_io_dim from ...testing import anatfile, funcfile from ..kernel_smooth import LinearFilter, fwhm2sigma, sigma2fwhm def test_anat_smooth(): anat = load_image(anatfile) smoother = LinearFilter(anat.coordmap, anat.shape) sanat = smoother.smooth(anat) assert sanat.shape == anat.shape assert sanat.coordmap == anat.coordmap assert not np.allclose(sanat.get_fdata(), anat.get_fdata()) def test_funny_coordmap(): # 5x4 affine should also work, and give same answer as 4x4 func = load_image(funcfile) cmap = func.coordmap # Give the affine a rotation aff = np.eye(5) aff[:3,:3] = euler2mat(0.3, 0.2, 0.1) cmap_rot = AffineTransform(cmap.function_range, cmap.function_range, aff) func_rot = Image(func.get_fdata(), compose(cmap_rot, cmap)) func1 = func_rot[...,1] # 5x4 affine smoother = LinearFilter(func1.coordmap, func1.shape) sfunc1 = smoother.smooth(func1) # OK # And same as for 4x4 affine cmap3d = drop_io_dim(cmap, 't') func3d = Image(func1.get_fdata(), cmap3d) smoother = LinearFilter(func3d.coordmap, func3d.shape) sfunc3d = smoother.smooth(func3d) assert sfunc1.shape == sfunc3d.shape assert_array_almost_equal(sfunc1.get_fdata(), sfunc3d.get_fdata()) # And same with no rotation func_fresh = func[...,1] # 5x4 affine, no rotation smoother = LinearFilter(func_fresh.coordmap, func_fresh.shape) sfunc_fresh = smoother.smooth(func_fresh) assert sfunc1.shape == sfunc_fresh.shape assert_array_almost_equal(sfunc1.get_fdata(), sfunc_fresh.get_fdata()) def test_func_smooth(): func = load_image(funcfile) smoother = LinearFilter(func.coordmap, func.shape) # should work, but currently broken : sfunc = smoother.smooth(func) pytest.raises(NotImplementedError, smoother.smooth, func) def test_sigma_fwhm(): # ensure that fwhm2sigma and sigma2fwhm are inverses of each other fwhm = np.arange(1.0, 5.0, 0.1) sigma = np.arange(1.0, 5.0, 0.1) assert np.allclose(sigma2fwhm(fwhm2sigma(fwhm)), fwhm) assert np.allclose(fwhm2sigma(sigma2fwhm(sigma)), sigma) def test_kernel(): # Verify that convolution with a delta function gives the correct # answer. tol = 0.9999 sdtol = 1.0e-8 for x in range(6): shape = randint(30, 60 + 1, (3,)) # pos of delta ii, jj, kk = randint(11, 17 + 1, (3,)) # random affine coordmap (diagonal and translations) coordmap = AffineTransform.from_start_step( 'ijk', 'xyz', randint(5, 20 + 1, (3,)) * 0.25, randint(5, 10 + 1, (3,)) * 0.5) # delta function in 3D array signal = np.zeros(shape) signal[ii,jj,kk] = 1. signal = Image(signal, coordmap=coordmap) # A filter with coordmap, shape matched to image kernel = LinearFilter(coordmap, shape, fwhm=randint(50, 100 + 1) / 10.) # smoothed normalized 3D array ssignal = kernel.smooth(signal).get_fdata() ssignal[:] *= kernel.norms[kernel.normalization] # 3 points * signal.size array I = np.indices(ssignal.shape) I.shape = (kernel.coordmap.ndims[0], np.prod(shape)) # location of maximum in smoothed array i, j, k = I[:, np.argmax(ssignal[:].flat)] # same place as we put it before smoothing? assert (i,j,k) == (ii,jj,kk) # get physical points position relative to position of delta Z = kernel.coordmap(I.T) - kernel.coordmap([i,j,k]) _k = kernel(Z) _k.shape = ssignal.shape assert np.corrcoef(_k[:].flat, ssignal[:].flat)[0,1] > tol assert (_k[:] - ssignal[:]).std() < sdtol def _indices(i,j,k,axis): I = np.zeros((3,20)) I[0] += i I[1] += j I[2] += k I[axis] += np.arange(-10,10) return I.T vx = ssignal[i,j,(k-10):(k+10)] xformed_ijk = coordmap([i, j, k]) vvx = coordmap(_indices(i,j,k,2)) - xformed_ijk assert np.corrcoef(vx, kernel(vvx))[0,1] > tol vy = ssignal[i,(j-10):(j+10),k] vvy = coordmap(_indices(i,j,k,1)) - xformed_ijk assert np.corrcoef(vy, kernel(vvy))[0,1] > tol vz = ssignal[(i-10):(i+10),j,k] vvz = coordmap(_indices(i,j,k,0)) - xformed_ijk assert np.corrcoef(vz, kernel(vvz))[0,1] > tol nipy-0.6.1/nipy/algorithms/tests/test_resample.py000066400000000000000000000262341470056100100222110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from itertools import product import numpy as np import pytest from numpy.testing import assert_array_almost_equal from nipy.algorithms.resample import resample, resample_img2img from nipy.core.api import AffineTransform, ArrayCoordMap, Image, vox2mni from nipy.core.reference import slices from nipy.io.api import load_image from nipy.testing import anatfile, funcfile def test_resample_img2img(): fimg = load_image(funcfile) aimg = load_image(anatfile) resimg = resample_img2img(fimg, fimg) assert np.allclose(resimg.get_fdata(), fimg.get_fdata()) pytest.raises(ValueError, resample_img2img, fimg, aimg) # Hackish flag for enabling of pyplots of resamplingstest_2d_from_3d gui_review = False def test_rotate2d(): # Rotate an image in 2d on a square grid, should result in transposed image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1])) i = Image(np.ones((100,100)), g) # This sets the image data by writing into the array i.get_fdata()[50:55,40:55] = 3. a = np.array([[0,1,0], [1,0,0], [0,0,1]], np.float64) ir = resample(i, g2, a, (100, 100)) assert_array_almost_equal(ir.get_fdata().T, i.get_fdata()) def test_rotate2d2(): # Rotate an image in 2d on a non-square grid, should result in transposed # image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1])) i = Image(np.ones((100,80)), g) # This sets the image data by writing into the array i.get_fdata()[50:55,40:55] = 3. a = np.array([[0,1,0], [1,0,0], [0,0,1]], np.float64) ir = resample(i, g2, a, (80,100)) assert_array_almost_equal(ir.get_fdata().T, i.get_fdata()) def test_rotate2d3(): # Another way to rotate/transpose the image, similar to # test_rotate2d2 and test_rotate2d, except the world of the # output coordmap is the same as the world of the # original image. That is, the data is transposed on disk, but the # output coordinates are still 'x,'y' order, not 'y', 'x' order as # above # this functionality may or may not be used a lot. if data is to # be transposed but one wanted to keep the NIFTI order of output # coords this would do the trick g = AffineTransform.from_params('xy', 'ij', np.diag([0.5,0.7,1])) i = Image(np.ones((100,80)), g) # This sets the image data by writing into the array i.get_fdata()[50:55,40:55] = 3. a = np.identity(3) g2 = AffineTransform.from_params('xy', 'ij', np.array([[0,0.5,0], [0.7,0,0], [0,0,1]])) ir = resample(i, g2, a, (80,100)) assert_array_almost_equal(ir.get_fdata().T, i.get_fdata()) def test_rotate3d(): # Rotate / transpose a 3d image on a non-square grid g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.6,0.7,1])) g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.7,0.6,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_fdata()[50:55,40:55,30:33] = 3. a = np.array([[1,0,0,0], [0,0,1,0], [0,1,0,0], [0,0,0,1.]]) ir = resample(i, g2, a, (100,80,90)) assert_array_almost_equal(np.transpose(ir.get_fdata(), (0,2,1)), i.get_fdata()) def test_resample2d(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_fdata()[50:55,40:55] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 3x3 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted -8,-8 voxels towards the origin a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_fdata()[42:47,32:47], 3.) def test_resample2d1(): # Tests the same as test_resample2d, only using a callable instead of # an AffineTransform instance g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_fdata()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 def mapper(x): return np.dot(x, A.T) + b ir = resample(i, i.coordmap, mapper, (100,90)) assert_array_almost_equal(ir.get_fdata()[42:47,32:47], 3.) def test_resample2d2(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_fdata()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 ir = resample(i, i.coordmap, (A, b), (100,90)) assert_array_almost_equal(ir.get_fdata()[42:47,32:47], 3.) def test_resample2d3(): # Same as test_resample2d, only a different way of specifying # the transform: here it is an (A,b) pair g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_fdata()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_fdata()[42:47,32:47], 3.) def test_resample3d(): g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_fdata()[50:55,40:55,30:33] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 4x4 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted [-6,-8,-10] voxels towards the origin a = np.identity(4) a[:3,-1] = [3,4,5] ir = resample(i, i.coordmap, a, (100,90,80)) assert_array_almost_equal(ir.get_fdata()[44:49,32:47,20:23], 3.) def test_resample_outvalue(): # Test resampling with different modes, constant values, datatypes, orders def func(xyz): return xyz + np.asarray([1,0,0]) coordmap = vox2mni(np.eye(4)) arr = np.arange(3 * 3 * 3).reshape(3, 3, 3) aff = np.eye(4) aff[0, 3] = 1. # x translation for mapping, dt, order in product( [aff, func], [np.int8, np.intp, np.int32, np.int64, np.float32, np.float64], [0, 1, 3]): img = Image(arr.astype(dt), coordmap) # Test constant value of 0 img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='constant', cval=0.) exp_arr = np.zeros(arr.shape) exp_arr[:-1, :, :] = arr[1:, :, :] assert_array_almost_equal(img2.get_fdata(), exp_arr) # Test constant value of 1 img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='constant', cval=1.) exp_arr[-1, :, :] = 1 assert_array_almost_equal(img2.get_fdata(), exp_arr) # Test nearest neighbor img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='nearest') exp_arr[-1, :, :] = arr[-1, :, :] assert_array_almost_equal(img2.get_fdata(), exp_arr) # Test img2img target_coordmap = vox2mni(aff) target = Image(arr, target_coordmap) img2 = resample_img2img(img, target, 3, 'nearest') assert_array_almost_equal(img2.get_fdata(), exp_arr) img2 = resample_img2img(img, target, 3, 'constant', cval=1.) exp_arr[-1, :, :] = 1 assert_array_almost_equal(img2.get_fdata(), exp_arr) def test_nonaffine(): # resamples an image along a curve through the image. # # FIXME: use the reference.evaluate.Grid to perform this nicer def curve(x): # function accept N by 1, returns N by 2 return (np.vstack([5*np.sin(x.T),5*np.cos(x.T)]).T + [52,47]) for names in (('xy', 'ij', 't', 'u'),('ij', 'xy', 't', 's')): in_names, out_names, tin_names, tout_names = names g = AffineTransform.from_params(in_names, out_names, np.identity(3)) img = Image(np.ones((100,90)), g) img.get_fdata()[50:55,40:55] = 3. tcoordmap = AffineTransform.from_start_step( tin_names, tout_names, [0], [np.pi*1.8/100]) ir = resample(img, tcoordmap, curve, (100,)) if gui_review: import matplotlib.pyplot as plt plt.figure(num=3) plt.imshow(img, interpolation='nearest') d = curve(np.linspace(0,1.8*np.pi,100)) plt.plot(d[0], d[1]) plt.gca().set_ylim([0,99]) plt.gca().set_xlim([0,89]) plt.figure(num=4) plt.plot(ir.get_fdata()) def test_2d_from_3d(): # Resample a 3d image on a 2d affine grid # This example creates a coordmap that coincides with # the 10th slice of an image, and checks that # resampling agrees with the data in the 10th slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) i = Image(np.ones(shape), g) i.get_fdata()[50:55,40:55,30:33] = 3. a = np.identity(4) g2 = ArrayCoordMap.from_shape(g, shape)[10] ir = resample(i, g2.coordmap, a, g2.shape) assert_array_almost_equal(ir.get_fdata(), i[10].get_fdata()) def test_slice_from_3d(): # Resample a 3d image, returning a zslice, yslice and xslice # # This example creates a coordmap that coincides with # a given z, y, or x slice of an image, and checks that # resampling agrees with the data in the given slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) img = Image(np.ones(shape), g) img.get_fdata()[50:55,40:55,30:33] = 3 I = np.identity(4) zsl = slices.zslice(26, ((0,49.5), 100), ((0,44.5), 90), img.reference) ir = resample(img, zsl, I, (100, 90)) assert_array_almost_equal(ir.get_fdata(), img[:,:,53].get_fdata()) ysl = slices.yslice(22, ((0,49.5), 100), ((0,39.5), 80), img.reference) ir = resample(img, ysl, I, (100, 80)) assert_array_almost_equal(ir.get_fdata(), img[:,45,:].get_fdata()) xsl = slices.xslice(15.5, ((0,44.5), 90), ((0,39.5), 80), img.reference) ir = resample(img, xsl, I, (90, 80)) assert_array_almost_equal(ir.get_fdata(), img[32,:,:].get_fdata()) nipy-0.6.1/nipy/algorithms/utils/000077500000000000000000000000001470056100100167575ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/utils/__init__.py000066400000000000000000000000001470056100100210560ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/utils/fast_distance.py000066400000000000000000000020661470056100100221440ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ this module contains a function to perform fast distance computation on arrays Author : Bertrand Thirion, 2008-2011 """ import numpy as np def euclidean_distance(X, Y=None): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors Parameters ---------- X, array of shape (n1,p) Y=None, array of shape (n2,p) if Y==None, then Y=X is used instead Returns ------- ED, array of shape(n1, n2) with all the pairwise distance """ if Y is None: Y = X if X.shape[1] != Y.shape[1]: raise ValueError("incompatible dimension for X and Y matrices") n1 = X.shape[0] n2 = Y.shape[0] NX = np.reshape(np.sum(X * X, 1), (n1, 1)) NY = np.reshape(np.sum(Y * Y, 1), (1, n2)) ED = np.repeat(NX, n2, 1) ED += np.repeat(NY, n1, 0) ED -= 2 * np.dot(X, Y.T) ED = np.maximum(ED, 0) ED = np.sqrt(ED) return ED nipy-0.6.1/nipy/algorithms/utils/matrices.py000066400000000000000000000112731470056100100211440ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities for working with matrices """ import numpy as np import scipy.linalg as spl def matrix_rank(M, tol=None): ''' Return rank of matrix using SVD method Rank of the array is the number of SVD singular values of the array that are greater than `tol`. This version of matrix rank is very similar to the numpy.linalg version except for the use of: * scipy.linalg.svd instead of numpy.linalg.svd. * the MATLAB algorithm for default tolerance calculation ``matrix_rank`` appeared in numpy.linalg in December 2009, first available in numpy 1.5.0. Parameters ---------- M : array-like array of <=2 dimensions tol : {None, float} threshold below which SVD values are considered zero. If `tol` is None, and `S` is an array with singular values for `M`, and `eps` is the epsilon value for datatype of `S`, then `tol` set to ``S.max() * eps * max(M.shape)``. Examples -------- >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.zeros((4,4))) # All zeros - zero rank 0 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 >>> matrix_rank([1]) # accepts array-like 1 Notes ----- We check for numerical rank deficiency by using ``tol=max(M.shape) * eps * S[0]`` (where ``S[0]`` is the maximum singular value and thus the 2-norm of the matrix). This is one tolerance threshold for rank deficiency, and the default algorithm used by MATLAB [#2]_. When floating point roundoff is the main concern, then "numerical rank deficiency" is a reasonable choice. In some cases you may prefer other definitions. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [#1] G. H. Golub and C. F. Van Loan, _Matrix Computations_. Baltimore: Johns Hopkins University Press, 1996. .. [#2] http://www.mathworks.com/help/techdoc/ref/rank.html ''' M = np.asarray(M) if M.ndim > 2: raise TypeError('array should have 2 or fewer dimensions') if M.ndim < 2: return int(not np.all(M==0)) S = spl.svd(M, compute_uv=False) if tol is None: tol = S.max() * np.finfo(S.dtype).eps * max(M.shape) return np.sum(S > tol) def full_rank(X, r=None): """ Return full-rank matrix whose column span is the same as X Uses an SVD decomposition. If the rank of `X` is known it can be specified by `r` -- no check is made to ensure that this really is the rank of X. Parameters ---------- X : array-like 2D array which may not be of full rank. r : None or int Known rank of `X`. r=None results in standard matrix rank calculation. We do not check `r` is really the rank of X; it is to speed up calculations when the rank is already known. Returns ------- fX : array Full-rank matrix with column span matching that of `X` """ if r is None: r = matrix_rank(X) V, D, U = spl.svd(X, full_matrices=0) order = np.argsort(D) order = order[::-1] value = [V[:,order[i]] for i in range(r)] return np.asarray(np.transpose(value)).astype(np.float64) def pos_recipr(X): """ Return element-wise reciprocal of array, setting `X`<=0 to 0 Return the reciprocal of an array, setting all entries less than or equal to 0 to 0. Therefore, it presumes that X should be positive in general. Parameters ---------- X : array-like Returns ------- rX : array array of same shape as `X`, dtype np.float64, with values set to 1/X where X > 0, 0 otherwise """ X = np.asarray(X) out = np.zeros(X.shape) gt_0 = X > 0 out[gt_0] = 1. / X[gt_0] return out def recipr0(X): """ Return element-wise reciprocal of array, `X`==0 -> 0 Return the reciprocal of an array, setting all entries equal to 0 as 0. It does not assume that X should be positive in general. Parameters ---------- X : array-like Returns ------- rX : array """ X = np.asarray(X) out = np.zeros(X.shape) ne_0 = X != 0 out[ne_0] = 1. / X[ne_0] return out nipy-0.6.1/nipy/algorithms/utils/pca.py000066400000000000000000000363171470056100100201060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides a class for principal components analysis (PCA). PCA is an orthonormal, linear transform (i.e., a rotation) that maps the data to a new coordinate system such that the maximal variability of the data lies on the first coordinate (or the first principal component), the second greatest variability is projected onto the second coordinate, and so on. The resulting data has unit covariance (i.e., it is decorrelated). This technique can be used to reduce the dimensionality of the data. More specifically, the data is projected onto the eigenvectors of the covariance matrix. """ import numpy as np import numpy.linalg as npl from ...core.image.image import rollimg from ...core.reference.coordinate_map import ( AxisError, drop_io_dim, io_axis_indices, orth_axes, ) from ...utils import SCTYPES def pca(data, axis=0, mask=None, ncomp=None, standardize=True, design_keep=None, design_resid='mean', tol_ratio=0.01): """Compute the SVD PCA of an array-like thing over `axis`. Parameters ---------- data : ndarray-like (float) The array on which to perform PCA over axis `axis` (below) axis : int, optional The axis over which to perform PCA (axis identifying observations). Default is 0 (first) mask : ndarray-like (np.bool_), optional An optional mask, should have shape given by data axes, with `axis` removed, i.e.: ``s = data.shape; s.pop(axis); msk_shape=s`` ncomp : {None, int}, optional How many component basis projections to return. If ncomp is None (the default) then the number of components is given by the calculated rank of the data, after applying `design_keep`, `design_resid` and `tol_ratio` below. We always return all the basis vectors and percent variance for each component; `ncomp` refers only to the number of basis_projections returned. standardize : bool, optional If True, standardize so each time series (after application of `design_keep` and `design_resid`) has the same standard deviation, as calculated by the ``np.std`` function. design_keep : None or ndarray, optional Data is projected onto the column span of design_keep. None (default) equivalent to ``np.identity(data.shape[axis])`` design_resid : str or None or ndarray, optional After projecting onto the column span of design_keep, data is projected perpendicular to the column span of this matrix. If None, we do no such second projection. If a string 'mean', then the mean of the data is removed, equivalent to passing a column vector matrix of 1s. tol_ratio : float, optional If ``XZ`` is the vector of singular values of the projection matrix from `design_keep` and `design_resid`, and S are the singular values of ``XZ``, then `tol_ratio` is the value used to calculate the effective rank of the projection of the design, as in ``rank = ((S / S.max) > tol_ratio).sum()`` Returns ------- results : dict $G$ is the number of non-trivial components found after applying `tol_ratio` to the projections of `design_keep` and `design_resid`. `results` has keys: * ``basis_vectors``: series over `axis`, shape (data.shape[axis], G) - the eigenvectors of the PCA * ``pcnt_var``: percent variance explained by component, shape (G,) * ``basis_projections``: PCA components, with components varying over axis `axis`; thus shape given by: ``s = list(data.shape); s[axis] = ncomp`` * ``axis``: axis over which PCA has been performed. Notes ----- See ``pca_image.m`` from ``fmristat`` for Keith Worsley's code on which some of this is based. See: http://en.wikipedia.org/wiki/Principal_component_analysis for some inspiration for naming - particularly 'basis_vectors' and 'basis_projections' Examples -------- >>> arr = np.random.normal(size=(17, 10, 12, 14)) >>> msk = np.all(arr > -2, axis=0) >>> res = pca(arr, mask=msk, ncomp=9) Basis vectors are columns. There is one column for each component. The number of components is the calculated rank of the data matrix after applying the various projections listed in the parameters. In this case we are only removing the mean, so the number of components is one less than the axis over which we do the PCA (here axis=0 by default). >>> res['basis_vectors'].shape (17, 16) Basis projections are arrays with components in the dimension over which we have done the PCA (axis=0 by default). Because we set `ncomp` above, we only retain `ncomp` components. >>> res['basis_projections'].shape (9, 10, 12, 14) """ data = np.asarray(data) # We roll the PCA axis to be first, for convenience if axis is None: raise ValueError('axis cannot be None') data = np.rollaxis(data, axis) if mask is not None: mask = np.asarray(mask) if not data.shape[1:] == mask.shape: raise ValueError('Mask should match dimensions of data other than ' 'the axis over which to do the PCA') if isinstance(design_resid, str) and design_resid == 'mean': # equivalent to: design_resid = np.ones((data.shape[0], 1)) def project_resid(Y): return Y - Y.mean(0)[None,...] elif design_resid is None: def project_resid(Y): return Y else: # matrix passed, we hope projector = np.dot(design_resid, npl.pinv(design_resid)) def project_resid(Y): return Y - np.dot(projector, Y) if standardize: def rmse_scales_func(std_source): # modifies array in place resid = project_resid(std_source) # root mean square of the residual rmse = np.sqrt(np.square(resid).sum(axis=0) / resid.shape[0]) # positive 1/rmse return np.where(rmse<=0, 0, 1. / rmse) else: rmse_scales_func = None """ Perform the computations needed for the PCA. This stores the covariance/correlation matrix of the data in the attribute 'C'. The components are stored as the attributes 'components', for an fMRI image these are the time series explaining the most variance. Now, we compute projection matrices. First, data is projected onto the columnspace of design_keep, then it is projected perpendicular to column space of design_resid. """ if design_keep is None: X = np.eye(data.shape[0]) else: X = np.dot(design_keep, npl.pinv(design_keep)) XZ = project_resid(X) UX, SX, VX = npl.svd(XZ, full_matrices=0) # The matrix UX has orthonormal columns and represents the # final "column space" that the data will be projected onto. rank = (SX/SX.max() > tol_ratio).sum() UX = UX[:,:rank].T # calculate covariance matrix in full-rank column space. The returned # array is roughly: YX = dot(UX, data); C = dot(YX, YX.T), perhaps where the # data has been standardized, perhaps summed over slices C_full_rank = _get_covariance(data, UX, rmse_scales_func, mask) # find the eigenvalues D and eigenvectors Vs of the covariance # matrix D, Vs = npl.eigh(C_full_rank) # Compute basis vectors in original column space basis_vectors = np.dot(UX.T, Vs).T # sort both in descending order of eigenvalues order = np.argsort(-D) D = D[order] basis_vectors = basis_vectors[order] pcntvar = D * 100 / D.sum() """ Output the component basis_projections """ if ncomp is None: ncomp = rank subVX = basis_vectors[:ncomp] out = _get_basis_projections(data, subVX, rmse_scales_func) # Roll PCA image axis back to original position in data array if axis < 0: axis += data.ndim out = np.rollaxis(out, 0, axis+1) return {'basis_vectors': basis_vectors.T, 'pcnt_var': pcntvar, 'basis_projections': out, 'axis': axis} def _get_covariance(data, UX, rmse_scales_func, mask): # number of points in PCA dimension rank, n_pts = UX.shape C = np.zeros((rank, rank)) # nan_to_num only for floating point masks if mask is not None: nan_to_num = mask.dtype.type in (SCTYPES['float'] + SCTYPES['complex']) # loop over next dimension to save memory if data.ndim == 2: # If we have 2D data, just do the covariance all in one shot, by using # a slice that is the equivalent of the ':' slice syntax slices = [slice(None)] else: # If we have more then 2D, then we iterate over slices in the second # dimension, in order to save memory slices = [slice(i,i+1) for i in range(data.shape[1])] for s_slice in slices: Y = data[:,s_slice].reshape((n_pts, -1)) # project data into required space YX = np.dot(UX, Y) if rmse_scales_func is not None: YX *= rmse_scales_func(Y) if mask is not None: # weight data with mask. Usually the weights will be 0,1 msk_slice = mask[s_slice].reshape(Y.shape[1]) if nan_to_num: # but if floats, check for NaNs too. msk_slice = np.nan_to_num(msk_slice) YX = YX * msk_slice C += np.dot(YX, YX.T) return C def _get_basis_projections(data, subVX, rmse_scales_func): ncomp = subVX.shape[0] out = np.empty((ncomp,) + data.shape[1:], float) for i in range(data.shape[1]): Y = data[:,i].reshape((data.shape[0], -1)) U = np.dot(subVX, Y) if rmse_scales_func is not None: U *= rmse_scales_func(Y) U.shape = (U.shape[0],) + data.shape[2:] out[:,i] = U return out def pca_image(img, axis='t', mask=None, ncomp=None, standardize=True, design_keep=None, design_resid='mean', tol_ratio=0.01): """ Compute the PCA of an image over a specified axis Parameters ---------- img : Image The image on which to perform PCA over the given `axis` axis : str or int, optional Axis over which to perform PCA. Default is 't'. If `axis` is an integer, gives the index of the input (domain) axis of `img`. If `axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. mask : Image, optional An optional mask, should have shape == image.shape[:3] and the same coordinate map as `img` but with `axis` dropped ncomp : {None, int}, optional How many component basis projections to return. If ncomp is None (the default) then the number of components is given by the calculated rank of the data, after applying `design_keep`, `design_resid` and `tol_ratio` below. We always return all the basis vectors and percent variance for each component; `ncomp` refers only to the number of basis_projections returned. standardize : bool, optional If True, standardize so each time series (after application of `design_keep` and `design_resid`) has the same standard deviation, as calculated by the ``np.std`` function. design_keep : None or ndarray, optional Data is projected onto the column span of design_keep. None (default) equivalent to ``np.identity(data.shape[axis])`` design_resid : str or None or ndarray, optional After projecting onto the column span of design_keep, data is projected perpendicular to the column span of this matrix. If None, we do no such second projection. If a string 'mean', then the mean of the data is removed, equivalent to passing a column vector matrix of 1s. tol_ratio : float, optional If ``XZ`` is the vector of singular values of the projection matrix from `design_keep` and `design_resid`, and S are the singular values of ``XZ``, then `tol_ratio` is the value used to calculate the effective rank of the projection of the design, as in ``rank = ((S / S.max) > tol_ratio).sum()`` Returns ------- results : dict $L$ is the number of non-trivial components found after applying `tol_ratio` to the projections of `design_keep` and `design_resid`. `results` has keys: * ``basis_vectors``: series over `axis`, shape (data.shape[axis], L) - the eigenvectors of the PCA * ``pcnt_var``: percent variance explained by component, shape (L,) * ``basis_projections``: PCA components, with components varying over axis `axis`; thus shape given by: ``s = list(data.shape); s[axis] = ncomp`` * ``axis``: axis over which PCA has been performed. Examples -------- >>> from nipy.testing import funcfile >>> from nipy import load_image >>> func_img = load_image(funcfile) Time is the fourth axis >>> func_img.coordmap.function_range CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S', 't'), name='aligned', coord_dtype=float64) >>> func_img.shape (17, 21, 3, 20) Calculate the PCA over time, by default >>> res = pca_image(func_img) >>> res['basis_projections'].coordmap.function_range CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S', 'PCA components'), name='aligned', coord_dtype=float64) The number of components is one less than the number of time points >>> res['basis_projections'].shape (17, 21, 3, 19) """ img_klass = img.__class__ # Which axes are we operating over? in_ax, out_ax = io_axis_indices(img.coordmap, axis) if None in (in_ax, out_ax): raise AxisError(f'Cannot identify matching input output axes with "{axis}"') if not orth_axes(in_ax, out_ax, img.coordmap.affine): raise AxisError(f'Input and output axes found from "{axis}" not orthogonal ' 'to rest of affine') # Roll the chosen axis to input position zero work_img = rollimg(img, axis) if mask is not None: if not mask.coordmap.similar_to(drop_io_dim(img.coordmap, axis)): raise ValueError("Mask should have matching coordmap to `img` " f"coordmap with dropped axis {axis}") data = work_img.get_fdata() if mask is not None: mask_data = mask.get_fdata() else: mask_data = None # do the PCA res = pca(data, 0, mask_data, ncomp, standardize, design_keep, design_resid, tol_ratio) # Clean up images after PCA # Rename the axis we dropped, at position 0 after rollimg output_coordmap = work_img.coordmap.renamed_domain( {0: 'PCA components'}) # And the matching output axis - which has not moved position output_coordmap = output_coordmap.renamed_range( {out_ax: 'PCA components'}) output_img = img_klass(res['basis_projections'], output_coordmap) # We have to roll the axis back to the original position output_img = rollimg(output_img, 0, in_ax + 1) key = f'basis_vectors over {axis}' res[key] = res['basis_vectors'] res['basis_projections'] = output_img # Signal the roll in results res['axis'] = in_ax return res nipy-0.6.1/nipy/algorithms/utils/tests/000077500000000000000000000000001470056100100201215ustar00rootroot00000000000000nipy-0.6.1/nipy/algorithms/utils/tests/__init__.py000066400000000000000000000000271470056100100222310ustar00rootroot00000000000000# Make tests a package nipy-0.6.1/nipy/algorithms/utils/tests/test_fast_distance.py000066400000000000000000000015641470056100100243470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the fast distance estimator """ import numpy as np from numpy.testing import assert_almost_equal from ..fast_distance import euclidean_distance as ed def test_euclidean_1(): """ test that the euclidean distance is as expected """ nx, ny = (10, 12) X = np.random.randn(nx, 2) Y = np.random.randn(ny, 2) ED = ed(X, Y) ref = np.zeros((nx, ny)) for i in range(nx): ref[i] = np.sqrt(np.sum((Y - X[i])**2, 1)) assert_almost_equal(ED, ref) def test_euclidean_2(): """ test that the euclidean distance is as expected """ nx = 10 X = np.random.randn(nx, 2) ED = ed(X) ref = np.zeros((nx, nx)) for i in range(nx): ref[i] = np.sqrt(np.sum((X - X[i])**2, 1)) assert_almost_equal(ED, ref) nipy-0.6.1/nipy/algorithms/utils/tests/test_matrices.py000066400000000000000000000050301470056100100233370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for utils.matrices """ import numpy as np import scipy.linalg as spl from numpy.testing import assert_almost_equal, assert_array_almost_equal from ..matrices import full_rank, matrix_rank, pos_recipr, recipr0 def test_matrix_rank(): # Full rank matrix assert 4 == matrix_rank(np.eye(4)) I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix assert matrix_rank(I) == 3 # All zeros - zero rank assert matrix_rank(np.zeros((4,4))) == 0 # 1 dimension - rank 1 unless all 0 assert matrix_rank(np.ones((4,))) == 1 assert matrix_rank(np.zeros((4,))) == 0 # accepts array-like assert matrix_rank([1]) == 1 # Make rank deficient matrix rng = np.random.RandomState(20120613) X = rng.normal(size=(40, 10)) X[:, 0] = X[:, 1] + X[:, 2] S = spl.svd(X, compute_uv=False) eps = np.finfo(X.dtype).eps assert matrix_rank(X, tol=0) == 10 assert matrix_rank(X, tol=S.min() - eps) == 10 assert matrix_rank(X, tol=S.min() + eps) == 9 def test_full_rank(): rng = np.random.RandomState(20110831) X = rng.standard_normal((40,5)) # A quick rank check assert matrix_rank(X) == 5 X[:,0] = X[:,1] + X[:,2] assert matrix_rank(X) == 4 Y1 = full_rank(X) assert Y1.shape == (40,4) Y2 = full_rank(X, r=3) assert Y2.shape == (40,3) Y3 = full_rank(X, r=4) assert Y3.shape == (40,4) # Windows - there seems to be some randomness in the SVD result; standardize # column signs before comparison flipper = np.sign(Y1[0]) * np.sign(Y3[0]) assert_almost_equal(Y1, Y3 * flipper) def test_pos_recipr(): X = np.array([2,1,-1,0], dtype=np.int8) eX = np.array([0.5,1,0,0]) Y = pos_recipr(X) assert_array_almost_equal(Y, eX) assert Y.dtype.type == np.float64 X2 = X.reshape((2,2)) Y2 = pos_recipr(X2) assert_array_almost_equal(Y2, eX.reshape((2,2))) # check that lists have arrived XL = [0, 1, -1] assert_array_almost_equal(pos_recipr(XL), [0, 1, 0]) # scalars assert pos_recipr(-1) == 0 assert pos_recipr(0) == 0 assert pos_recipr(2) == 0.5 def test_recipr0(): X = np.array([[2,1],[-4,0]]) Y = recipr0(X) assert_array_almost_equal(Y, np.array([[0.5,1],[-0.25,0]])) # check that lists have arrived XL = [0, 1, -1] assert_array_almost_equal(recipr0(XL), [0, 1, -1]) # scalars assert recipr0(-1) == -1 assert recipr0(0) == 0 assert recipr0(2) == 0.5 nipy-0.6.1/nipy/algorithms/utils/tests/test_pca.py000066400000000000000000000223041470056100100222760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import pytest from nipy.io.api import load_image from nipy.testing import ( assert_almost_equal, assert_array_almost_equal, funcfile, ) from nipy.utils import SCTYPES from ..pca import pca @pytest.fixture def data(): img = load_image(funcfile) arr = img.get_fdata() #arr = np.rollaxis(arr, 3) out = {'nimages': arr.shape[3]} out['fmridata'] = arr frame = out['fmridata'][...,0] out['mask'] = (frame > 500).astype(np.float64) return out def reconstruct(time_series, images, axis=0): # Reconstruct data from remaining components n_tps = time_series.shape[0] images = np.rollaxis(images, axis) ncomps = images.shape[0] img_size = np.prod(images.shape[1:]) rarr = images.reshape((ncomps, img_size)) recond = np.dot(time_series, rarr) recond = recond.reshape((n_tps,) + images.shape[1:]) if axis < 0: axis = axis + images.ndim recond = np.rollaxis(recond, 0, axis+1) return recond def root_mse(arr, axis=0): return np.sqrt(np.square(arr).sum(axis=axis) / arr.shape[axis]) def pos1pca(arr, axis=0, **kwargs): ''' Return basis vectors and projections with first row positive ''' res = pca(arr, axis, **kwargs) return res2pos1(res) def res2pos1(res): # Orient basis vectors in standard direction axis = res['axis'] bvs = res['basis_vectors'] bps = res['basis_projections'] signs = np.sign(bvs[0]) res['basis_vectors'] = bvs * signs new_axes = [None] * bps.ndim n_comps = res['basis_projections'].shape[axis] new_axes[axis] = slice(0,n_comps) res['basis_projections'] = bps * signs[tuple(new_axes)] return res def test_same_basis(data): arr4d = data['fmridata'] shp = arr4d.shape arr2d = arr4d.reshape((np.prod(shp[:3]), shp[3])) res = pos1pca(arr2d, axis=-1) p1b_0 = res['basis_vectors'] for i in range(3): res_again = pos1pca(arr2d, axis=-1) assert_almost_equal(res_again['basis_vectors'], p1b_0) def test_2d_eq_4d(data): arr4d = data['fmridata'] shp = arr4d.shape arr2d = arr4d.reshape((np.prod(shp[:3]), shp[3])) arr3d = arr4d.reshape((shp[0], -1, shp[3])) res4d = pos1pca(arr4d, axis=-1, standardize=False) res3d = pos1pca(arr3d, axis=-1, standardize=False) res2d = pos1pca(arr2d, axis=-1, standardize=False) assert_array_almost_equal(res4d['basis_vectors'], res2d['basis_vectors']) assert_array_almost_equal(res4d['basis_vectors'], res3d['basis_vectors']) def test_input_effects(data): # Test effects of axis specifications ntotal = data['nimages'] - 1 # return full rank - mean PCA over last axis p = pos1pca(data['fmridata'], -1) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ntotal,) assert p['pcnt_var'].shape == (ntotal,) # Reconstructed data lacks only mean rarr = reconstruct(p['basis_vectors'], p['basis_projections'], -1) rarr = rarr + data['fmridata'].mean(-1)[...,None] # same effect if over axis 0, which is the default arr = data['fmridata'] arr = np.rollaxis(arr, -1) # Same basis once we've normalized the signs pr = pos1pca(arr) out_arr = np.rollaxis(pr['basis_projections'], 0, 4) assert_almost_equal(out_arr, p['basis_projections']) assert_almost_equal(p['basis_vectors'], pr['basis_vectors']) assert_almost_equal(p['pcnt_var'], pr['pcnt_var']) # Check axis None raises error pytest.raises(ValueError, pca, data['fmridata'], None) def test_diagonality(data): # basis_projections are diagonal, whether standardized or not p = pca(data['fmridata'], -1) # standardized assert diagonal_covariance(p['basis_projections'], -1) pns = pca(data['fmridata'], -1, standardize=False) # not assert diagonal_covariance(pns['basis_projections'], -1) def diagonal_covariance(arr, axis=0): arr = np.rollaxis(arr, axis) arr = arr.reshape(arr.shape[0], -1) aTa = np.dot(arr, arr.T) return np.allclose(aTa, np.diag(np.diag(aTa)), atol=1e-6) def test_2D(): # check that a standard 2D PCA works too M = 100 N = 20 L = M-1 # rank after mean removal data = np.random.uniform(size=(M, N)) p = pca(data) ts = p['basis_vectors'] imgs = p['basis_projections'] assert ts.shape == (M, L) assert imgs.shape == (L, N) rimgs = reconstruct(ts, imgs) # add back the sqrt MSE, because we standardized data_mean = data.mean(0)[None,...] demeaned = data - data_mean rmse = root_mse(demeaned, axis=0)[None,...] # also add back the mean assert_array_almost_equal((rimgs * rmse) + data_mean, data) # if standardize is set, or not, covariance is diagonal assert diagonal_covariance(imgs) p = pca(data, standardize=False) imgs = p['basis_projections'] assert diagonal_covariance(imgs) def test_PCAMask(data): # for 2 and 4D case ntotal = data['nimages'] - 1 ncomp = 5 arr4d = data['fmridata'] mask3d = data['mask'] arr2d = arr4d.reshape((-1, data['nimages'])) mask1d = mask3d.reshape(-1) for arr, mask in (arr4d, mask3d), (arr2d, mask1d): p = pca(arr, -1, mask, ncomp=ncomp) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == mask.shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) # Any reasonable datatype for mask for dt in ([np.bool_] + SCTYPES['int'] + SCTYPES['uint'] + SCTYPES['float']): p = pca(arr4d, -1, mask3d.astype(dt), ncomp=ncomp) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == mask3d.shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) # Mask data shape must match pytest.raises(ValueError, pca, arr4d, -1, mask1d) def test_PCAMask_nostandardize(data): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, data['mask'], ncomp=ncomp, standardize=False) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_PCANoMask(data): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, ncomp=ncomp) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_PCANoMask_nostandardize(data): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, ncomp=ncomp, standardize=False) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_keep(data): # Data is projected onto k=10 dimensional subspace # then has its mean removed. # Should still have rank 10. k = 10 ncomp = 5 ntotal = k X = np.random.standard_normal((data['nimages'], k)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_keep=X) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_resid(data): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 ntotal = k X = np.random.standard_normal((data['nimages'], k)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_resid=X) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) # if design_resid is None, we do not remove the mean, and we get # full rank from our data p = pca(data['fmridata'], -1, design_resid=None) rank = p['basis_vectors'].shape[1] assert rank == data['nimages'] rarr = reconstruct(p['basis_vectors'], p['basis_projections'], -1) # add back the sqrt MSE, because we standardized rmse = root_mse(data['fmridata'], axis=-1)[...,None] assert np.allclose(rarr * rmse, data['fmridata']) def test_both(data): k1 = 10 k2 = 8 ncomp = 5 ntotal = k1 X1 = np.random.standard_normal((data['nimages'], k1)) X2 = np.random.standard_normal((data['nimages'], k2)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_resid=X2, design_keep=X1) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == data['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) nipy-0.6.1/nipy/algorithms/utils/tests/test_pca_image.py000066400000000000000000000312551470056100100234450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import pytest from nibabel.affines import from_matvec from numpy.testing import assert_almost_equal, assert_array_equal from ....core.api import AffineTransform, Image from ....core.api import CoordinateSystem as CS from ....core.image.image import rollimg from ....core.reference.coordinate_map import AxisError, drop_io_dim from ....core.reference.coordinate_map import product as cm_product from ....io.api import load_image from ....testing import funcfile from ..pca import pca as pca_array from ..pca import pca_image from .test_pca import res2pos1 @pytest.fixture(scope='module') def data_dict(): img = load_image(funcfile) # Here, I'm just doing this so I know that img.shape[0] is the number of # volumes t0_img = rollimg(img, 't') out = {'nimages': t0_img.shape[0]} # Below, I am just making a mask because I already have img, I know I can do # this. In principle, though, the pca function will just take another Image # as a mask img_data = t0_img.get_fdata() mask_cmap = drop_io_dim(img.coordmap, 't') first_frame = img_data[0] mask = Image(np.greater(first_frame, 500).astype(np.float64), mask_cmap) out['fmridata'] = img out['mask'] = mask # print data_dict['mask'].shape, np.sum(data_dict['mask'].get_fdata()) assert out['mask'].shape == (17, 21, 3) assert_almost_equal(np.sum(out['mask'].get_fdata()), 1071.0) return out def _rank(p): return p['basis_vectors'].shape[1] def test_PCAMask(data_dict): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], 't', data_dict['mask'], ncomp=ncomp) assert _rank(p) == ntotal assert p['axis'] == 3 assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_mask_match(data_dict): # we can't do PCA over spatial axes if we use a spatial mask ncomp = 5 out_coords = data_dict['mask'].reference.coord_names for i, o, n in zip('ijk', out_coords, [0,1,2]): pytest.raises(ValueError, pca_image, data_dict['fmridata'], i, data_dict['mask'], ncomp) pytest.raises(ValueError, pca_image, data_dict['fmridata'], o, data_dict['mask'], ncomp) pytest.raises(ValueError, pca_image, data_dict['fmridata'], n, data_dict['mask'], ncomp) def test_PCAMask_nostandardize(data_dict): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], 't', data_dict['mask'], ncomp=ncomp, standardize=False) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_PCANoMask(data_dict): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], ncomp=ncomp) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_PCANoMask_nostandardize(data_dict): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], ncomp=ncomp, standardize=False) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_keep(data_dict): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 nimages = data_dict['nimages'] ntotal = k X = np.random.standard_normal((nimages, k)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_keep=X) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_resid(data_dict): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 nimages = data_dict['nimages'] ntotal = k X = np.random.standard_normal((nimages, k)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_resid=X) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_both(data_dict): k1 = 10 k2 = 8 ncomp = 5 nimages = data_dict['nimages'] ntotal = k1 X1 = np.random.standard_normal((nimages, k1)) X2 = np.random.standard_normal((nimages, k2)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_resid=X2, design_keep=X1) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert (p['basis_projections'].shape == data_dict['mask'].shape + (ncomp,)) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components')) assert_array_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_5d(data_dict): # What happened to a 5d image? We should get 4d images back img = data_dict['fmridata'] data = img.get_fdata() # Make a last input and output axis called 'v' vcs = CS('v') xtra_cmap = AffineTransform(vcs, vcs, np.eye(2)) cmap_5d = cm_product(img.coordmap, xtra_cmap) data_5d = data.reshape(data.shape + (1,)) fived = Image(data_5d, cmap_5d) mask = data_dict['mask'] mask_data = mask.get_fdata() mask_data = mask_data.reshape(mask_data.shape + (1,)) cmap_4d = cm_product(mask.coordmap, xtra_cmap) mask4d = Image(mask_data, cmap_4d) nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(fived, 't', mask4d, ncomp=ncomp) assert _rank(p) == ntotal assert p['basis_vectors over t'].shape == (nimages, ntotal) assert p['basis_projections'].shape == data.shape[:3] + (ncomp, 1) assert p['pcnt_var'].shape == (ntotal,) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert (p['basis_projections'].axes.coord_names == ('i','j','k','PCA components','v')) assert_array_equal(p['basis_projections'].coordmap.affine, fived.coordmap.affine) # flip the PCA dimension to end data_5d = data.reshape(data.shape[:3] + (1, data.shape[3])) # Make the last axis name be 'group'. 't' is not a length 1 dimension we # are going to leave as is gcs = CS(['group']) xtra_cmap = AffineTransform(gcs, gcs, np.eye(2)) cmap_5d = cm_product(img.coordmap, xtra_cmap) fived = Image(data_5d, cmap_5d) # Give the mask a 't' dimension, but no group dimension mask = data_dict['mask'] mask_data = mask.get_fdata() mask_data = mask_data.reshape(mask_data.shape + (1,)) # We need to replicate the time scaling of the image cmap, hence the 2. in # the affine xtra_cmap = AffineTransform(CS('t'), CS('t'), np.diag([2., 1])) cmap_4d = cm_product(mask.coordmap, xtra_cmap) mask4d = Image(mask_data, cmap_4d) nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 # We can now show the axis does not have to be time p = pca_image(fived, mask=mask4d, ncomp=ncomp, axis='group') assert p['basis_vectors over group'].shape == (nimages, ntotal) assert (p['basis_projections'].axes.coord_names == ('i','j','k','t','PCA components')) assert (p['basis_projections'].shape == data.shape[:3] + (1, ncomp)) def img_res2pos1(res, bv_key): # Orient basis vectors in standard direction axis = res['axis'] bvs = res[bv_key] bps_img = res['basis_projections'] bps = bps_img.get_fdata() signs = np.sign(bvs[0]) res[bv_key] = bvs * signs new_axes = [None] * bps.ndim n_comps = bps.shape[axis] new_axes[axis] = slice(0, n_comps) res['basis_projections'] = Image(bps * signs[tuple(new_axes)], bps_img.coordmap) return res def test_other_axes(data_dict): # With a diagonal affine, we can do PCA on any axis ncomp = 5 img = data_dict['fmridata'] in_coords = list(img.axes.coord_names) img_data = img.get_fdata() for axis_no, axis_name in enumerate('ijkt'): p = pca_image(img, axis_name, ncomp=ncomp) n = img.shape[axis_no] bv_key = 'basis_vectors over ' + axis_name assert _rank(p) == n - 1 assert p[bv_key].shape == (n, n - 1) # We get the expected data back dp = pca_array(img_data, axis_no, ncomp=ncomp) # We have to make sure the signs are the same; on Windows it seems the # signs can flip even between two runs on the same data pos_p = img_res2pos1(p, bv_key) pos_dp = res2pos1(dp) img_bps = pos_p['basis_projections'] assert_almost_equal(pos_dp['basis_vectors'], pos_p[bv_key]) assert_almost_equal(pos_dp['basis_projections'], img_bps.get_fdata()) # And we've replaced the expected axis exp_coords = in_coords.copy() exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert img_bps.axes.coord_names == tuple(exp_coords) # If the affine is not diagonal, we'll get an error aff = from_matvec(np.arange(16).reshape(4,4)) nd_cmap = AffineTransform(img.axes, img.reference, aff) nd_img = Image(img_data, nd_cmap) for axis_name in 'ijkt': pytest.raises(AxisError, pca_image, nd_img, axis_name) # Only for the non-diagonal parts aff = np.array([[1, 2, 0, 0, 10], [2, 1, 0, 0, 11], [0, 0, 3, 0, 12], [0, 0, 0, 4, 13], [0, 0, 0, 0, 1]]) nd_cmap = AffineTransform(img.axes, img.reference, aff) nd_img = Image(img_data, nd_cmap) for axis_name in 'ij': pytest.raises(AxisError, pca_image, nd_img, axis_name) for axis_name in 'kt': p = pca_image(img, axis_name, ncomp=ncomp) exp_coords = in_coords.copy() exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert p['basis_projections'].axes.coord_names == tuple(exp_coords) nipy-0.6.1/nipy/cli/000077500000000000000000000000001470056100100142155ustar00rootroot00000000000000nipy-0.6.1/nipy/cli/__init__.py000066400000000000000000000000501470056100100163210ustar00rootroot00000000000000""" Logic for command line scripts. """ nipy-0.6.1/nipy/cli/diagnose.py000066400000000000000000000047261470056100100163710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: DESCRIP = 'Calculate and write results for diagnostic screen' EPILOG = \ '''nipy_diagnose will generate a series of diagnostic images for a 4D fMRI image volume. The following images will be generated. is the input filename extension (e.g. '.nii'): * components_

    ¼(>? ?!?K?k?,??u#=٧ƛy/-6 ܾ;*BҮ>X+>AhټG>Z>YK>z6 ?IqE!ɿo錿!.:$S>)?@+?<[?>vh=D=)jvܾiVpT1>,ń??B>㗾yo>e>[8 - O@=?>@7@V@7i@@_?Ap?? X>=J>1O?勝?)}?Ly?@? =_>̽pP=I[??@ @J.@@Vw@@@3@ڍ?t ?P? [&?)|?7?< @_?M9(<;xm>_?K@@p@j,g@*@ޏB@YX@S/@?b˄?a>?C?h?L!+? z+ev¾ ?rN@i\[@-@8@ m@%@W@u@(?.[|?ȯ?cl?oL?>=e>>"ĜL̿Ԯu> @6@@M@@ Uc@b@v@{@?7? q?L?C?N> 㼤):> z$ƾ?~@NJ@\@R҅@5@2@@2?q_?vF??׸?jcN=>kp?mG?E3?yA@m@@fJ@@Z @`@@ @=\?[?-@}"@7? |?Z6?F'?P?3??Gl?=?w9@{ʉ@U@@(@~@4@.?c@@l@m@>>?Yb@'@>4@F@? )@j@-~@6@&@@n@)@@@Rɒ@x?}˽q9>f?>.@)@9N@9??B@i@ a@@@N@)H@.Z@mR@}L8@@<3?,S=??, @"1?*?@)O?C?0@wW@dI@AP@)[@@@g@>@X@?[&?K?Q?9?C?Pު?ʃX>&@H<@&@y"?lC@ 0N@*51@@R?O?y??O?]?@p?;>jݧ?%@2@E@D@?((@I@/@y?J٘?f?Ֆ?n>?*@6@Qy1@N`#@u2@RI@Q@P@=1@@%@_B@?ҪM?d?"?Z @,@rl@"@@]@X=@ׯV@.@?h?7?F?x X?UZH?_ ??b?H@@pέ@ {@Z@5@ti6@k??>>P>}?=?A?<|>͜?T@ֽ@ _@*@n@U?P>񄋾E&muAv>ݿ ?2@UM@9T@U@^(0@0S۾5/#sWֿ)\ 1>+?_?.?w?VjAX"'c1Ҿ. ʽC|=> YlӿUGNŐP()qǾ}Sÿ׌s#; Sxa_r>$?SL?""?v?>l )SV>">o>b?e>؇8?c>BQ?U>??}B?l ??W?\)=U^Y>y?d?/>ӭ>$=;>fe?*Z?}O?]۾<>{>*G>a6 @wd@|@L?=;K?+S< k>q>B?V>K;J꨾eW?[?? Zξ4=>=޼sn@@2?Ѧ?T,>.Ͼ {;F>f>>DF>cZ<{3}>p?胊?>f&q>G>..?so?}?W?YоZ@&t P<? ?DI?I>Ӯ3AU}>&?J#?? =Ƨp>'>f%?=>"m~=z|S5c0IN>ls? @uQ@_T??N?n>7??(?=vyӌ>wtD?*? m꾒k>?{֜ 2Ă ? N=@΅@@'@.f.@c?p??r]?c*==Сa?2?=>N_?@R@V?<0ÿ?F}B?v:@,@:@P.@#@\.@R@K%i@@2> ?@L6?H1?'?)@ @; @Q@[Q@ @tr@RI@4y@=@;P@Y?>f7:>1?u??:> (ܽ`hXRCL?V@J@M@֧<@:J@ٔy@@ )s@??>?\yQ?i?\>2$=Z?6tI]jj?@?t\:@V@H_@gx@>@g@$y@x??䘩?!?[?G>b >ϞӾ:f[պ>P?G0>@i%@@)@IĄ@@BX??a}?i?Y?5? ?J :?(ZS>V>p?^?v1??b@#F@̦@iB@@v@E?# @1@@ٽ?c?؇??hI?c??6?>?LԾ?lR@q@Ϥ@@8q@-׃@W^@@gt@1@@G@H?/?@?*@ϑ&@:U$@?dz?*?2@i@ @p@H@X@I@m@b@4@.l7@)?~ >? $@ @^@?4?@a,@D@ o@J@1@o}@}J@1@W@W?K>T>7}?s??[?RY??|?a@&@XD@c@y~m@Wի@@>@;@t?͵>;e?Ń?$?6>NjCp=3,?B??6@-@_@g@I @pv@ݪ@<@?W?ec?s?ɖ?d?yh?l>@?t'?@%@@Y@Ir@FV@ ?`8O?k?>?s?~?@(@@1*@#9@[>@v0@h@W@)@@R?j ?ϰ?Z?K?b@[@@@{@f@`n@QF@@?;z?C1? ?b#?{?J=@@Vz#@A}@@?@9@}@N9@zz?|7C)g_?1y?Vb?'?ȅ#@v@e@Q@@Po@ø@\,=z,I Z,оܦV.=> `X? @BT@_@)J@I@-@˒I ^₿Vп S@>E? @&@ne?Ϳ/y俆&:D@N-?c?s?K2(IYOgRb@ҽ> C?.?t>g>9>'E0^)?"?گ>+l??wn?׈??@X={PRBG=?(tg?k?_?<@?}Ee>L=QrA=v?$?ʾd=*=?U?*?5Z@/?-q>xH=??_ ?8Q?q_r?j>܅0ھ"7Ҳ]vx M>?ZQ?@`6? ͿKܿOTg>n2??2??[.?)b>|=k< Cp~4;s5S>׵??_RX?澁^žd >v>?E?>f>={7)f=?ȽX>! @?aV??7*?3>)%??~? ??J`? >х?Do*?>ֻ*>̾ -]R׾ >g?@۰E@ @?i?t?|??v?N?|W>D?j?Zߘ>5|4 5<:?nPt¿4?V@hb@q@A@t@7X@ @@04@e? >b>?(?`t$A}?d?ˍ>Io0H?L2@ @@ŵ@ @l@W}{@@x*@>{?m?@~(>???IPcY"z5?@k@H&@P@Gs@^{@dW@4@i@Z?|?Cn< ׊)N>B?U>'k>U>*IL?? @$@-@rM@M`@E@@nK@L&@[?@]? ?K>J Lt,Ǿ`=@۾v >Nѫ??<ͥ?ݕ@Y@[@"\@Fi@C@&@wM??B5?lr>'=a4B=7ü.+`<3?>?.@tZ@@|@-ak@+@^?M??&@ϒ!@@х?&?݋>|G?*?>CJ?b?@$I@%i~@Iː@=@?9@N=@ @#2@Ɗ@Cc@@i?/?O??E#?>l?y4??? @M@#@L؆@@ ps@ @4@C@GC @+]?pL? vE?L[?Y`?X?xY????dz'@!k@t@,c@@H4@m@C@?ۓ?]??x^??I>o~xU4=># ??2?@9t4@㚍@B@C@@w:>;%(>Z?=P輸>->>d>?*?@˞@Y@@Ă@7D?ui6> ^????O@/+@.t-@@֋ @@a?\Vj?=Z???%7@?@x9@M@E@;~?>*>9?PG?|i?u?280@R6@,@H@wV@5Z@f@eT@ .>Ǿ<2Z?m?5?g'@AA@*@E%@}/@?@V@V=n13K1̯G%)=yy[?{?!@iK'@9)@@@ d\c&6Fg,=ӇȾTu?ݔ??A@:t꿅࿮ =ɾ(/?? HZ.彿 >)c>l>䀇%45D6UK>8$qrfk?@i@,=?t@@?UH}= y>N>(*>=K>WZKbx>%>@>.m?#n>g=F˽þc Si녾: :K#aXb˽e?д"@P.@G#@=æ?iZê%)?V>dܗ'OS`>y>t= t;aƝA€?h@2@2@Ri? =͑>G+?>%1#V@ݾ>*?^??hW?r~?2??˳?߶?a?K] @@_?\??Jˎ?1^>3 />+??h%??G?L @H3@i@]@ @A?֣]?+?Q3?'?Z>"RIӾھc>H?D?'l>:@S@u!f@Lk]@q@W@m@@P@,@8 @`>a?3-JLi>I? FNX?!4@zK@(k@@>@^@=#j@[@2Ba@&6@> b?D>@e0>MDMC¿$ࡿ<>ʍ?-@}.@y@b@]@;@5@)~@O@?c?"h?XK>/>DHa=񝿽 L9-??c?S?D'@Da@r@ @Wv@F@@n?~?H>y,e.2IXϻI>i?]?R?5_?GG@w@h@I@$k@PA@l@?FO?}>Hܾ_->=Nɾ:;Z#=Y ??@aC@&O@D-@}@ʾ]@,$@e? @"@K?d> Rp>^ɝ>;Ӿ>>S&?e?*@T+@XA(@1, @?@?Q?A@ z@F4@`1?㲠Ĉ I>Hß_q$>d?]? ?-@@@2@ި?@y@e@@@(?2ƾ{Ҿ>l3?L?w?$??}?~?z@ @c@J@J@L@p@~?@3?\>⥆d??w?G?[l?W???bT@fwh@i@|@@?@U???8>}>>y`?8?>Ie?}%?y?V?_@c>@{@$|*@fE?>@??8>F=E&p>t>qSi>>*>^B?Z@@[\@P? 꾇"j >7O p)Jɝ#>l%>'?`e?u?D3@M@ B@?,fҾ72١>M׾[ 49?l??k?????hu??X?w콽o:??bs?Wב?,? @|@ ??>{4Ծ={=?b?.??ѓ? ;?L ǿTaп)1iܛI=|?+U?>q?&!RHw Y=; hO{4z!->Hi>k眿 @Tr@i?.s??qF?Fپd|%>sgU硰|g.?|@?'=z> PB?>f@0@R&@1%@ @V?b=X!zcŭ[L%bx> >̂>7>U>~?+@K@D@i'1@@ ?=KCt>w}̮E3t>lP*d?T?#@(7@ 2@۵*@@7?)?",/(iy̿ϨiJ=>֬?G?S?^??,?9??ݶ?=? @ݷ@x'@@ޓ?bBΣ_@!?'?@?@Z@?h va ο?t_?>x?>?@1@G@T@El@z@f@+ @v?n?u?ݾF^Or<=lv۽Yr=P>3;KP 02??#1@lf@@@ğ@@=K@ @|@ ?f?MѪ?߉J?1>F/\=rm8h<,c?;+@h@; @J۟@)@l @CW@8$@A @J2@h@Ǹ?(?酿׿Oڽ *=?F @4*@Q@P@i@1@WC@@kW@@?P۶=Wx`6IR:^F?qȟ??6@@g@pA@@S@0@ ?ML?[9??4<"#?f?#g= c"dK=>??8A@ w'@~@@?8@$@3@T@x@>Q }?"q?/=}QV>#O?훟?r?6 ??z@3 @$Z@~X@!6@V8@#>7񪑾> $` "?\Ҁ??3???S@r@p @#@@˛@ѻ?@Q/G> =>c%?ӱ8?mL?{??? ̬?h+?u@3@e8:@at@U}@O{@>Oag?V??$?[?}T?~p? f@2X@z>@@??W?Y?o^??x>Y>??^?N?<?w>qz2=YxL@?]b@ʑ'@f?׽^+%?&c?7> ˎ??DX:?Y:0>@K@O @ T>#qag=HAy|i=R?1ߚ? 9E?G0 mo0@ $@>O??|_[w5Kb.!?3?,?]-?ٺ??;Su?qI?LY>?w>(O??($@w??>v ӈ ;v=?o?8>??9???,H8{kb2.~$'??@?|ű?j8? *>"%[Px%={ ?{eP??zm?f ?IH?mRʿͿ}ND,?|:?>02߿<Mlb^վjt?ӽW1ʖ?2??`?D@B'@?ܾ++{@.⁑!Ѕ>?>Y>#>>Ծ=?־?.?@<.@P @8?I2'ŀK^ D5X{>E>I>G?$/?uv9?Ϙ? @@ m @I@65@)!@?@E>避S8-kv<{ ?%?Ԫ?Y?y??v,??N[?N@@fP@xa@<@9?ZmIAr$ ^#?m? @ @?P?H?7?E?fS?R??D@~k@Md@8@yx 俏@ȦVž!3?l[?G @$@?!?ˢ???d@?U8?e@ @'@j̯L wR=?Ի??A??[>Yd+>?$?Z?5?4-@L{v@ʞ@S@(@@ƀF@Q??hӴ?[ @M>@!)@3?hg& ?B?l>>}?WQ@a@@V@~@+@h;@B?KgO@F~@k@W8@&?5phX-W>ltV9>_٨?qk??߸w=3?́??h?@3c@tO@~@!@E@=/\>̭?H?0 ~=ld>>??С?@e(@Q@O7@_@j@A3>>] ?L?Š?ѹ??=(>D?Ғ??@&"@|?l ?ė??0?"v7? 2V?#(??0?v@RH?e?>e?+oo@)K@k< @ﻞ? 1-?ӽ>_>pp?2\?>16?8?1@?8=J*R'+O@>S@@?D%Q>>1׉>?? ?PRpe4@0@*?|\>,o65ľ2b׽9W??>Z~ @@h@? Y=pAq`:ݾ$[c =mg?CS{?=ѐ??G??¾ V󉚾Xw=?h|?X[?p,?I>I??2k=IT~2>ؙ?K?4=o>>+?d>J?bbUkL>={>ވė;i¾>q6=xp.>0 >/l|?>IƒC%*H}8>8?h>6 >o?@RB? F-? {Al!G[2ܾD4?>mY=>_?Ni?$E>j2?7d?y?H*?R ֩N= Ee%1Z =?C!?g3?f`??b[?岙?m?*?@ @"a@EAT/BalL<>?K???U$@A @-??.?%3?-3?L-@]@`j@Hk`4W+?7>t?f?|=@[,@l+@:@i)@?g?(:?~?e?,@U_@ w@E򇿬~yt|Jw?/J? @690@f2@c5@$@ @hM@%?d?8v??@)@G־ҫD>4>e>%??e@r"@+@:@]^@7w@tw@m@[E@v@t?(??z=u?bQ@_@?'?Z?~@1"@!@@Q^@v@@n@Ԇ@CV@o*@??+@&M@A@f]@Y ?S?=U@^&@_@T?7@g@^ӆ@@ȉ@u@;\@U:?:N@^z@I^@v@c?嚏?EN?0?@f @?O?3u@1@1N@ua@ZwZ@$P@bt?!@gV@b@0@C@d?8 ?y? @3@@Tɟ?H?G7u? ?z@i'@+@ z?ƙ@?@4@@Uw@@^k?m?V8@[@[^@4?,t;n)7?=??@,?@ī@@D@Ȅ@+@]?˨?@3L@Z? ?>9xؒU=q*??3?◟@#@w@|o@lZ@= ??5?ƾ? @:?`?4?=>{Ѧ >`?k! ?@qc@oB#@A@ ?3?  ? >?l?I?0?t@I?.>{>0?@"@ҫ??;"?dd?M?;1?EZ?ʉ??͹@/%@Y?p> /i@s4@H?3>}>:>>G?]i?ii>??]?i?VžbYL@}(@3?zI;wSs׾z>lY?|S?R!g?%@[?j?=r=͖޿k';>(Z6> 7f???HHtz' c*@DϽ?؝@?m>߽ @-F:(yߗ~V.lǾy>>=t >vh]\?j:D+k `ٽ=6O>(LjLO8u ϊf PfdӼ!𐿨=>?> =h=`O?j? ?<5>]4?+-? =?>aR:>> ]sIn.\N/A=1 ?k>>o>fOU?z?[wj?ey)?Ō]?_?`ƥ>*>e>>M?߅-?nw<8*LP>acs>r?6??QC>Z?YA?8?@?s?@h??T|?+@?S)G?V?/@q%h-]AW>??>>/?O@NDF@M]@74@z@@Q?}?c?@)@A?WgcoV?6? >?-?H7@5q@F}@^rF@+3#@H@]?*s?`?`M@ju @_79nwp9bk?fI?(7? /@ԨZ@=b@"pg@O@??.?#=?!?? 8=r6?o?h?v?Ԝ??M5?K@c@*T@e@Rk@h-@D)@U(@?q?y?@}?@q/@B<@#@@c: @gb @6A@Y@F@h[@b@Y@tf@p@5cM@|4@<9?h@WB@yG@@@ '@&@:!@(@4@K@D@QE@K@w\@\L?KN???@$@B$@ډ@ F@?9ȑ??UA@S@Xd?!"?d`?ɶ>'> (L???֩@@ѧ@+d@2@R?n?=>??,@jN?.?(dt?b>{3A?Uڙ@̈́@o}@3DA@j?H!?>A?^b9?V̙??^E?%?0&?cB@r[@#@;?!iz?$ ?}>m?N&?>r%??k?h?ڽk@)@?M?>b>w?j)?ݾo>Nʏ?s1?,@;B?)?lLAM ? ?ӇH~@3g?}^?~T?ǫ=dz?ef=t???=*>ap$\K-[x8>@Z>o.>+9?QѿGuܿ BGj>.,~8iѾo=ӿv)C[-^;P)Z¿Nй'5Q??B?L>#$?%?J? ?y=3?P?/?6>-R=R>?H7^sg>?Ŝ?.? ??d?Io? "? ?>t>KA >zۨ=CU?[b~?b? >K̽ʛ3>{?ְ??>}>.?\k?K?(?Ч??LC?\k=J??T?|z'>;8?_#?:.>j? +@G@2=@R@MZ@֝?>P5?G?=?[?, I1)~N4ᔿލ_>?J@w@o@um@.]@`b?!S?o???q?~CO|ѽ3#?Q@&@M@ b@-@{@G@m?X??F@@6??F*O?I'E?z|> ?o*@qI@׮@d.@) Q@-@ l@*C@H-6@Ws?#?p@3j@^@nK@6/@ ?|?~?+@}=c@ÊL@$^5@?@9@C@j@d@?6@@l@?@p#@O@@?/?:-@M`@$j@y|:@T@P'@,O@@l@dOd@ @@"@LX@@?W?u ?R?1Y@@\ L@Z@@t?k?! @8@I:@e@q@|@~@?L??n@&c3@T@iV@@`@?G>4h??@p@A@wր@M@v @e>?-X?b?D@`@D@?lI?9?s?]m?Y?@v9@tu@J@1?tA?5??/@K@@?`+?:?>>L?@ @>r@5@@??X???y@}?Ud3?bX ?ɽ8=T@Oy@Kc`@0@5?U?:Q?D?-j?N?=? ?e?%Ev@nD@6C@/@2?xu?5>A>)? @> @>CE?Z:$?k=@@m?f\?S?.[\?pE=3<t?Ӯ?f{Ҿ>+?*.`}>?r >D[>+<1Wr?s*B?"D "%?,=@E|gb+f>پ>h2]Im? >u=`>kvF98;=-'>]i=C Y辉,6ԲfLE8{d%ѫ? j߿;"Nx,彃4m>g?`S>LG=!Xl???d|b??}?>Rs=)=;(>+=%ZS40>o??(:?1=c? ?v?|>!=> >i-Ⱦ3=??/?Ŭ>eA?Zg?CL?>ʵ[??{???z??p>,R>Me?s?@:/@U?M>0>@|7(u Ȟl p?is?d@@pG@ {@͞=@/??e?2\@J@?&7 ?UE?Q@"k@2@R%@ԇD@3@@?@[E@?i@ۇ@Hs@:C*@?YY?;? ?TG@TŔ@q @HH@;@w@)!@$RB@7;@(h@H@ u@$$@z?/sa?gyY?m?T1@i@p@PC@M @8 @*@23@1@yy@I@Hd@ @??:???bT(@Y,}@w@W@1̀???֩?+?p@Z@\@!@1?`?Mb?[?@(V@~;k@.@0y?i> 4?aY?@Oj@S@ާ"@J?/v?z\???D@o@nA@Í?>kT>ڢ;W@zy@`8:@cn@C7?Өt?-?? @FD@]@'@(0M?'*< '#)@*D:@@'?Q ?u?? ?{?;$/@ű!@ ?s> ,F`@U @?a?\?+ @?- .@"?17??N???4?b?챌?a?l$?V?>?>m?>,8>Ɣ? ?}?=xg?ܾG"<>fՠ>\m??%Q3>'> =ܾ3AL=PP?;?vnb19'YɾNо+>LX??G ƿ>l_u[iʔA$ N'FED,=Q;߃z?Y>V+ >|2?n???>=X] Q0]Q\P4>i=?>>ϵQ?'_?cb?>7?,v?:?>? >|>}}^_%??5??c?!B?c>> >0>:]?m?"??ߘ?¡?O?>ik=Rf@KSa@ @?Z?#P!ޤ>S;?iC?l??f?PQ@@+m@{H%@wO?H @&@&@?>%>#p5M)=?3?9m?? @LSH@S0@P@3ʊ@e@&@=@#@?9?(,oJ>?J@F`@C,@@ O@|r@ڀ@joe@P@2@혂@K@ @?x?;?3?z@<@ @5@>@S;@)p3@&92@<+@<@֐@Z-t@d0@0?G*? ?ž?@I@=@A@@ @7h@L @a@I@v@ @!?J?7o>I?#yf@4@@ҿ@ %?)?&?9?@'@YQ@^x@eۮ?n?yd?y?u1@<@2Iz@@u\?FB>?_?W{?Yu@aaZ@Y&@'@;?*!??|?X?3Y@@ke@r/@?H!>L>h@L@z@꠺?Տ??t??8?@@-ij@>?w`OhC@,%#@I?O?5[???@ ?b@?"@^:z@Fb@V?;`@d/?aq?U??M"@?1!?|?@~<@h6 @|K> @@-?P>>a?@?l{?͍?y??Y?<GW?U?=;,>z(f??+?rZ?0y? ? ?(?c>(>>R>%)?Qt?Z?(?K"s*sJZL>.?%gfX%|1i<ӿbdڦ?ƿk2G*_C?/L5ҿ ¿CfCAX _zw>N3?>>ݚ3?g?3'?CҽB{F~Jj>%&?I??'B>>0"=?>p>k?!E??x?? ?J=>>39>?> ?@@ o?w ?r2Am0@B@/@@ m?O>ه>v1?`>U@>p?9P@@E|@0H@b ?o@@u8@%?>>`w=)??ú??`$@{@x@H@Vt@@@j3@_?+'6?$>Ӈ? @ +^@iu@J@EH@`r@@a@u_P@6@#ʩ@v^@M@8???i/@0@B@a@Sv@f@Um@GnR@U@Ԅ@?@.@'@T??q?$X#@!m@/L@@ڄ@a`@9Z@ @Ve?e@H@uǃ@b @Si?!? G?2@-@@\b@/l@3O@v=@?X?@Y@9@)o??:V@? ?jE@P@ׇ@J@N*@?4>ZO@ '<@B?{??E@" @ ?J?JB@r@p@2@c?/@M @!?J>?!@-@?e?@%@M@8@I??`?>7tþ e>K3@-@@4?+@t@=`@N @?J>mBK ̽Թ?@T?T?5#@o@vg@/?^@?&&zl?G?9?ֻ??:@+?:k?bn>D6-;_(?e?8C?V??h?mH?>Y ^pz P,12:sF">s[nj<7Դ+?Mcݾ2Ai,>>B6>)Ж 1 )b^`NnQg?1?̱? R>??&?z?P g}ϾzoɾئO> ?]@E@n@^'@CZ@`5@?9%?'ʾf1%B?u?+?\ U??R@@@@@݌@٨@Se@A?"F`?D@t%Z@o@UU@.S@+@V̑@rz@@N@\@K@>? @SA@$H@@>@@@)@Z@Oi@1k@ @/Xn@?%9v?|@fs>@v`@_5@v@v@(@@H@ޟ=@.@@k@xG?उ?7@'0@]@]@:@n@O@6@@Ï@h@?hp?o ?#@5<@r)E@*_@@7Gq@^@Sq@Cf@9Cc@}@Z_>=%И?O|#@ϺC@M*@U@@K!@Vq@h@X@$@.?(~Z~>t@.B@F @F?j?!@D@'u@?E&MzG|?AC#@, @8??9@(?D @c@ >ޝNʿõ=???v FנY¿٥4|]?킧?ʀ4$nur-"z_Ƌ7Q;g!fCԿ\;,-љ}ǿ_<MX׿ګ^DESe,ؿ刿\7ρ Ͽ=^wtп4>J>"~-qߏH!.PT=x>.X>t>R?x?5?&5[7 =v,8#0E8?@'@0<@lz?K@.?>3#JԲ ?o??w?t,@l@@Tl@Ğ@ p'@_+xN??֔N@Jx@S`@A~~@@ͅ@vW|@(پ@Y@>,9>>?`J@o@@K8@ @=~@kzg@Yh[@ۨ@H@x@z?w8R?#@cXs@Ӿ@*"@@ϱ@&N@HD@$@R@@*?ʍ?_2-@uv@ٞ@@+@ @-(@KH@-a{@)'@Y?k>aC?`-@z@ @3!m@?a@ @?]@m?@̭Cྡྷ?2@z@cJ@?$??@@^>x˿Jv@[g@=K@P??/@1?8-]$-= ȟ?10@e?5>W>~=n>=??b?տο^ܿJǿE1&Di?eފ'0տȿ%) -ؿMh>%K>C1} Cid>*}??se?1"??Um=ѿvՏ?#(?c=+N4վL>0?S"#?KM?J?$,:?up>Nȵ?!{?(>]<ھPB>>=>0?r?? ˃?ƾB @$?m><ռ"&>y<Ԍi.G>Х?@@7@@,w@M?hȂ}XnT~7Yƪ>б?C?N@ f@T@~R@ʮ?$po޾9k>k\@HHA@W@/ƍ@W@@p?;K@5>Q?l@-@}@@SZ@|@QE@0~?);>?pG@OV@z@@A@4@@N@N???>RG?->@Tx@@%ǟ@ ݖ@Of@'@@냂?ld?/~L@@v@G@b6@?}?c ?9\>N@G@1@+@??5*??3nע5I=-@^fW@4@X>(?Y= pƿsq?@n?ݾSEᄂ?1#*J:KbJ8=g>ƾ >d2?@X@?5~?̓?jW?K>Q=2?y?> >,ER?v????+?(G??y??4@? ?Z +&Z ~? l?y @N?+?)?cm?i>@Z>rYO䙿l|!j@ @z&>`Y*n[58;w9[gA?8@@oQٿ^ØKά򧾅W= ?ɿ@@?, +E>-?yl??E0?_@dk¿Msu\?z_??@O @?ZpVW! c#"i?9?eEӠ@?0@+@ը`=ح>9D!>,@UNgE9Jɐ#=C'o>mȼ?@?՜?>c?Q Ȑ|?d?ˋ?Ű??=?R?b@"hL!*(+?/??Ω?]?䜾w>?-ý ?[?? N?6zN"=Y?C?S>>,OvơD3d|)!;c?h/7?M^>.((!~ \nɿо?gm??}t 4Π.p>=[C?0?wǘ*oj>2???"课X?"L?H1?t3@4hv>Ji?t?4 o ?A?b;n?9oMATBÃU>b\@?Ki?qb?/Ŀsٔ>?E?9>ր 鳿Hp(?*??k?E5 >|raGo>&Æ?_Z?z\?>?O2濏쿸Wžx#?wj?_?* ?#%g:d>-$?BԈ?ĿA6"=z=N%}>nipy-0.6.1/nipy/labs/spatial_models/tests/test_bsa.py000066400000000000000000000074061470056100100227160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Tests for bayesian_structural_analysis Author : Bertrand Thirion, 2009 """ #autoindent import numpy as np import scipy.stats as st from nipy.testing import assert_array_equal from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset from ..bayesian_structural_analysis import _stat_to_proba, compute_landmarks from ..discrete_domain import domain_from_binary_array def make_bsa_2d(betas, theta=3., sigma=5., ths=0, thq=0.5, smin=3, algorithm='density'): """ Function for performing bayesian structural analysis on a set of images. """ ref_dim = np.shape(betas[0]) n_subj = betas.shape[0] # get the functional information lbeta = np.array([np.ravel(betas[k]) for k in range(n_subj)]).T # the voxel volume is 1.0 dom = domain_from_binary_array(np.ones(ref_dim)) AF, BF = compute_landmarks(dom, lbeta, sigma, thq, ths, theta, smin, algorithm=algorithm, n_iter=100, burnin=10) return AF, BF # @dec.slow def test_bsa_methods(): # generate the data n_subj = 5 shape = (40, 40) pos = np.array([[12, 14], [20, 20], [30, 35]]) # make a dataset with a nothing feature null_ampli = np.array([0, 0, 0]) null_betas = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=null_ampli, width=5.0, seed=1) #null_betas = np.reshape(null_dataset, (n_subj, shape[0], shape[1])) # make a dataset with a something feature pos_ampli = np.array([5, 7, 6]) pos_betas = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=pos_ampli, width=5.0, seed=2) #pos_betas = np.reshape(pos_dataset, (n_subj, shape[0], shape[1])) # set various parameters theta = float(st.t.isf(0.01, 100)) sigma = 5. / 1.5 half_subjs = n_subj / 2 thq = 0.9 smin = 5 # tuple of tuples with each tuple being # (name_of_method, ths_value, data_set, test_function) algs_tests = ( ('density', half_subjs, null_betas, lambda AF, BF: AF.k == 0), ('co-occurrence', half_subjs, null_betas, lambda AF, BF: AF.k == 0), ('density', 1, pos_betas, lambda AF, BF: AF.k > 1)) for name, ths, betas, test_func in algs_tests: # run the algo AF, BF = make_bsa_2d(betas, theta, sigma, ths, thq, smin, algorithm=name) assert test_func(AF, BF) assert AF.map_label().shape == (np.prod(shape),) assert AF.kernel_density().shape == (np.prod(shape),) assert (AF.roi_prevalence() > ths).all() def test_pproba(): test = 5 * np.random.rand(10) order = np.argsort(-test) learn = np.random.rand(100) learn[:20] += 3 # pval = _stat_to_proba(test) # check that pvals are between 0 and 1, and that its is monotonous assert (pval >= 0).all() assert (pval <= 1).all() assert_array_equal(pval[order], np.sort(pval)) # pval = _stat_to_proba(test, learn) assert (pval >= 0).all() assert (pval <= 1).all() assert_array_equal(pval[order], np.sort(pval)) # for method in ['gauss_mixture', 'emp_null', 'gam_gauss']: pval = _stat_to_proba(test, learn, method=method) assert (pval >= 0).all() assert (pval <= 1).all() # assert_array_equal(pval[order], np.sort(pval), 6) nipy-0.6.1/nipy/labs/spatial_models/tests/test_bsa_io.py000066400000000000000000000022111470056100100233720ustar00rootroot00000000000000 from os.path import exists import numpy as np from nibabel import Nifti1Image from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset from ..bsa_io import make_bsa_image def test_parcel_intra_from_3d_images_list(in_tmp_path): """Test that a parcellation is generated, starting from a list of 3D images """ # Generate an image shape = (5, 5, 5) contrast_id = 'plop' mask_image = Nifti1Image(np.ones(shape), np.eye(4)) #mask_images = [mask_image for _ in range(5)] data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(mask=mask_image, out_image_file=datim) #run the algo landmark, hrois = make_bsa_image( mask_image, data_image, threshold=10., smin=0, sigma=1., prevalence_threshold=0, prevalence_pval=0.5, write_dir=in_tmp_path, algorithm='density', contrast_id=contrast_id) assert landmark == None assert len(hrois) == 5 assert exists(f'density_{contrast_id}.nii') assert exists(f'prevalence_{contrast_id}.nii') assert exists(f'AR_{contrast_id}.nii') assert exists(f'CR_{contrast_id}.nii') nipy-0.6.1/nipy/labs/spatial_models/tests/test_discrete_domain.py000066400000000000000000000170711470056100100253010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data """ import nibabel.gifti as nbg import numpy as np from nibabel import Nifti1Image from numpy.testing import assert_almost_equal, assert_array_equal from ..discrete_domain import ( domain_from_binary_array, domain_from_image, domain_from_mesh, grid_domain_from_binary_array, grid_domain_from_image, grid_domain_from_shape, smatrix_from_3d_array, smatrix_from_nd_array, smatrix_from_nd_idx, ) shape = np.array([5, 6, 7, 8, 9]) def generate_dataset(shape): """Generate a dataset with the described shape """ dim = len(shape) idx = np.reshape(np.indices(shape), (dim, -1)).T return idx def test_smatrix_1d(): """Test the 1-d topological domain """ idx = generate_dataset(shape[:1]) sm = smatrix_from_nd_idx(idx, nn=0) assert sm.data.size == 2 * shape[0] - 2 def test_smatrix_2d(): """Test the 2-d topological domain """ idx = generate_dataset(shape[:2]) sm = smatrix_from_nd_idx(idx, nn=0) ne = 2 * (2 * np.prod(shape[:2]) - shape[0] - shape[1]) assert sm.data.size == ne def test_smatrix_3d(): """Test the 3-d topological domain """ idx = generate_dataset(shape[:3]) sm = smatrix_from_nd_idx(idx) ne = 2 * (3 * np.prod(shape[:3]) - shape[0] * shape[1] - shape[0] * shape[2] - shape[1] * shape[2]) assert sm.data.size == ne def test_smatrix_4d(): """Test the 4-d topological domain """ idx = generate_dataset(shape[:4]) sm = smatrix_from_nd_idx(idx) ne = 4 * np.prod(shape[:4]) for d in range(4): ne -= np.prod(shape[:4]) / shape[d] ne *= 2 assert sm.data.size == ne def test_smatrix_5d(): """Test the 5-d topological domain """ idx = generate_dataset(shape) sm = smatrix_from_nd_idx(idx) ne = 5 * np.prod(shape) for d in range(5): ne -= np.prod(shape) / shape[d] ne *= 2 assert sm.data.size == ne def test_smatrix_5d_bis(): """Test the 5-d topological domain """ toto = np.ones(shape) sm = smatrix_from_nd_array(toto) ne = 5 * np.prod(shape) for d in range(5): ne -= np.prod(shape) / shape[d] ne *= 2 assert sm.data.size == ne def test_matrix_from_3d_array(): """Test the topology using the nipy.graph approach """ toto = np.ones(shape[:3]) sm = smatrix_from_3d_array(toto, 6) ne = 3 * np.prod(shape[:3]) for d in range(3): ne -= np.prod(shape[:3]) / shape[d] ne *= 2 print(sm.data, ne) assert (sm.data > 0).sum() == ne def test_array_domain(): """Test the construction of domain based on array """ toto = np.ones(shape) ddom = domain_from_binary_array(toto) assert np.sum(ddom.local_volume) == np.prod(shape) def test_connected_components(): """Test the estimation of connected components """ toto = np.ones(shape) ddom = domain_from_binary_array(toto) assert_array_equal(ddom.connected_components(), np.zeros(ddom.size)) def test_image_domain(): """Test the construction of domain based on image """ toto = np.ones(shape[:3]) affine = np.random.randn(4, 4) affine[3:, 0:3] = 0 nim = Nifti1Image(toto, affine) ddom = domain_from_image(nim) ref = np.sum(toto) * np.absolute(np.linalg.det(affine)) assert_almost_equal(np.sum(ddom.local_volume), ref) def test_image_feature(): """Test the construction of domain based on image and related feature """ mask = np.random.randn(*shape[:3]) > .5 noise = np.random.randn(*shape[:3]) affine = np.eye(4) mim = Nifti1Image(mask.astype('u1'), affine) nim = Nifti1Image(noise, affine) ddom = grid_domain_from_image(mim) ddom.make_feature_from_image(nim, 'noise') assert_almost_equal(ddom.features['noise'], noise[mask]) def test_array_grid_domain(): """Test the construction of grid domain based on array """ toto = np.ones(shape) ddom = grid_domain_from_binary_array(toto) assert np.sum(ddom.local_volume) == np.prod(shape) def test_image_grid_domain(): """Test the construction of grid domain based on image """ toto = np.ones(shape[:3]) affine = np.random.randn(4, 4) affine[3:, 0:3] = 0 nim = Nifti1Image(toto, affine) ddom = grid_domain_from_image(nim) ref = np.sum(toto) * np.absolute(np.linalg.det(affine[:3, 0:3])) assert_almost_equal(np.sum(ddom.local_volume), ref) def test_shape_grid_domain(): """ """ ddom = grid_domain_from_shape(shape) assert np.sum(ddom.local_volume) == np.prod(shape) def test_feature(): """ test feature inclusion """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) plop = ddom.get_feature('data') assert_almost_equal(plop, np.ravel(toto)) def test_mask_feature(): """ test_feature_masking """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) mdom = ddom.mask(np.ravel(toto > .5)) plop = mdom.get_feature('data') assert_almost_equal(plop, toto[toto > .5]) def test_domain_mask(): """test domain masking """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) mdom = ddom.mask(np.ravel(toto > .5)) assert mdom.size == np.sum(toto > .5) def test_grid_domain_mask(): """test grid domain masking """ toto = np.random.rand(*shape) ddom = grid_domain_from_binary_array(toto) mdom = ddom.mask(np.ravel(toto > .5)) assert mdom.size == np.sum(toto > .5) def test_domain_from_mesh(): """Test domain_from_mesh method """ coords = np.array([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.]]) triangles = np.asarray([[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]) darrays = [nbg.GiftiDataArray(coords, datatype='NIFTI_TYPE_FLOAT32'), nbg.GiftiDataArray(triangles, datatype='NIFTI_TYPE_INT32')] toy_image = nbg.GiftiImage(darrays=darrays) domain = domain_from_mesh(toy_image) # if we get there, we could build the domain, and that's what we wanted. assert_array_equal(domain.get_coord(), coords) def test_representative(): """ test representative computation """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) dmean = toto.mean() dmin = toto.min() dmax = toto.max() dmed = np.median(toto) assert_almost_equal(ddom.representative_feature('data', 'mean'), dmean) assert_almost_equal(ddom.representative_feature('data', 'min'), dmin) assert_almost_equal(ddom.representative_feature('data', 'max'), dmax) assert_almost_equal(ddom.representative_feature('data', 'median'), dmed) def test_integrate_1d(): """ test integration in 1d """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) assert_almost_equal(ddom.integrate('data'), toto.sum()) def test_integrate_2d(): """test integration in 2d """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ftoto = np.ravel(toto) f2 = np.vstack((ftoto, ftoto)).T ddom.set_feature('data', f2) ts = np.ones(2) * toto.sum() assert_almost_equal(ddom.integrate('data'), ts) nipy-0.6.1/nipy/labs/spatial_models/tests/test_hroi.py000066400000000000000000000136301470056100100231060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data In those tests, we often access some ROI directly by a fixed index instead of using the utility functions such as get_id() or select_id(). """ import numpy as np from numpy.testing import assert_array_equal from ..discrete_domain import domain_from_binary_array from ..hroi import HROI_as_discrete_domain_blobs, make_hroi_from_subdomain from ..mroi import subdomain_from_array shape = (5, 6, 7) def make_domain(): """Create a multiple ROI instance """ labels = np.ones(shape) dom = domain_from_binary_array(labels, affine=None) return dom ####################################################################### # Test on hierarchical ROI ####################################################################### def make_hroi(empty=False): """Create a multiple ROI instance """ labels = np.zeros(shape) if not empty: labels[4:, 5:, 6:] = 1 labels[:2, 0:2, 0:2] = 2 labels[:2, 5:, 6:] = 3 labels[:2, 0:2, 6:] = 4 labels[4:, 0:2, 6:] = 5 labels[4:, 0:2, 0:2] = 6 labels[4:, 5:, 0:2] = 7 labels[:2, 5:, 0:2] = 8 parents = np.zeros(9) else: labels = -np.ones(shape) parents = np.array([]) sd = subdomain_from_array(labels, affine=None, nn=0) hroi = make_hroi_from_subdomain(sd, parents) return hroi def test_hroi(): """ """ hroi = make_hroi() assert hroi.k == 9 def test_hroi_isleaf(): """ Test basic construction of a tree of isolated leaves """ hroi = make_hroi() hroi.select_roi([0] + list(range(2, 9))) assert hroi.k == 8 def test_hroi_isleaf_2(): """Test tree pruning, with parent remapping """ hroi = make_hroi() #import pdb; pdb.set_trace() hroi.select_roi(list(range(1, 9))) assert_array_equal(hroi.parents, np.arange(8).astype(np.int_)) def test_asc_merge(): """ Test ascending merge """ hroi = make_hroi() s1 = hroi.get_size(0) + hroi.get_size(1) total_size = np.sum([hroi.get_size(id) for id in hroi.get_id()]) assert hroi.get_size(0, ignore_children=False) == total_size hroi.merge_ascending([1]) assert hroi.get_size(0) == s1 def test_asc_merge_2(): """ Test ascending merge Test that ROI being their own parent are inchanged. """ hroi = make_hroi() s1 = hroi.get_size(0) hroi.merge_ascending([0]) assert hroi.k == 9 assert hroi.get_size(0) == s1 def test_asc_merge_3(): """Test ascending merge """ hroi = make_hroi() hroi.set_roi_feature('labels', np.arange(9)) hroi.set_roi_feature('labels2', np.arange(9)) hroi.merge_ascending([1], pull_features=['labels2']) assert hroi.get_roi_feature('labels', 0) == 0 assert hroi.get_roi_feature('labels2', 0) == 1 def test_asc_merge_4(): """Test ascending merge """ hroi = make_hroi() hroi.set_roi_feature('labels', list(range(9))) hroi.set_roi_feature('labels2', list(range(9))) parents = np.arange(9) - 1 parents[0] = 0 hroi.parents = parents labels3 = [hroi.label[hroi.label == k] for k in range(hroi.k)] hroi.set_feature('labels3', labels3) hroi.merge_ascending([1], pull_features=['labels2']) assert hroi.k == 8 assert hroi.get_roi_feature('labels', 0) == 0 assert hroi.get_roi_feature('labels2', 0) == 1 assert len(hroi.get_feature('labels3')) == hroi.k assert hroi.get_roi_feature('labels2').size == hroi.k def test_desc_merge(): """ Test descending merge """ hroi = make_hroi() parents = np.arange(hroi.k) parents[1] = 0 hroi.parents = parents s1 = hroi.get_size(0) + hroi.get_size(1) hroi.merge_descending() assert hroi.get_size()[0] == s1 def test_desc_merge_2(): """ Test descending merge """ hroi = make_hroi() parents = np.arange(-1, hroi.k - 1) parents[0] = 0 hroi.parents = parents hroi.set_roi_feature('labels', np.arange(hroi.k)) labels2 = [hroi.label[hroi.label == k] for k in range(hroi.k)] hroi.set_feature('labels2', labels2) hroi.merge_descending() assert hroi.k == 1 assert len(hroi.get_feature('labels2')) == hroi.k assert hroi.get_roi_feature('labels').size == hroi.k def test_desc_merge_3(): """ Test descending merge """ hroi = make_hroi() parents = np.minimum(np.arange(1, hroi.k + 1), hroi.k - 1) hroi.parents = parents hroi.merge_descending() assert hroi.k == 1 def test_leaves(): """ Test leaves """ hroi = make_hroi() size = hroi.get_size()[1:].copy() lroi = hroi.copy() lroi.reduce_to_leaves() assert lroi.k == 8 assert_array_equal(lroi.get_size(), size) assert_array_equal(lroi.get_leaves_id(), np.arange(1, 9)) def test_leaves_empty(): """Test the reduce_to_leaves method on an HROI containing no node """ hroi = make_hroi(empty=True) lroi = hroi.reduce_to_leaves() assert lroi.k == 0 def test_hroi_from_domain(): dom = make_domain() data = np.random.rand(*shape) data[:2, 0:2, 0:2] = 2 rdata = np.reshape(data, (data.size, 1)) hroi = HROI_as_discrete_domain_blobs(dom, rdata, threshold=1., smin=0) assert hroi.k == 1 def test_sd_representative(): """Test the computation of representative features """ hroi = make_hroi() hroi.parents = np.arange(9) hroi.parents[2] = 1 data = [[k] * hroi.get_size(k) for k in hroi.get_id()] hroi.set_feature('data', data) sums = hroi.representative_feature('data') for k in hroi.get_id(): assert sums[hroi.select_id(k)] == k sums2 = hroi.representative_feature('data', ignore_children=False) for k in hroi.get_id(): if k != 1: assert sums2[hroi.select_id(k)] == k else: assert sums2[1] == 17. / 9 nipy-0.6.1/nipy/labs/spatial_models/tests/test_mroi.py000066400000000000000000000174551470056100100231240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data """ from os.path import dirname from os.path import join as pjoin import numpy as np from nibabel import Nifti1Image, load from numpy.testing import assert_almost_equal, assert_array_equal from nipy.io.nibcompat import get_affine from ..discrete_domain import domain_from_binary_array, grid_domain_from_image from ..hroi import HROI_as_discrete_domain_blobs from ..mroi import subdomain_from_array, subdomain_from_balls shape = (5, 6, 7) ########################################################### # SubDomains tests ########################################################### def make_subdomain(): """Create a multiple ROI instance """ labels = np.zeros(shape) labels[4:, 5:, 6:] = 1 labels[:2, 0:2, 0:2] = 2 labels[:2, 5:, 6:] = 3 labels[:2, 0:2, 6:] = 4 labels[4:, 0:2, 6:] = 5 labels[4:, 0:2, 0:2] = 6 labels[4:, 5:, 0:2] = 7 labels[:2, 5:, 0:2] = 8 mroi = subdomain_from_array(labels - 1, affine=None) return mroi def test_subdomain(): """Test basic construction of multiple_roi """ mroi = make_subdomain() assert mroi.k == 8 def test_subdomain2(): """Test mroi.size """ mroi = make_subdomain() assert len(mroi.get_size()) == 8 for k in mroi.get_id(): assert (mroi.get_size(k) == np.sum(mroi.label == mroi.select_id(k))) def test_copy_subdomain(): """Test basic construction of multiple_roi """ mroi = make_subdomain() foo_feature = [[i] * j for i, j in enumerate(mroi.get_size())] foo_roi_feature = np.arange(mroi.k) mroi.set_feature('a', foo_feature) mroi.set_roi_feature('b', foo_roi_feature) mroi_copy = mroi.copy() # check some properties of mroi assert mroi.k == 8 for k in mroi.get_id(): assert_array_equal(mroi.get_feature('a', k), foo_feature[mroi.select_id(k)]) assert_array_equal(mroi.get_roi_feature('b'), foo_roi_feature) # delete mroi del mroi # check mroi_copy assert mroi_copy.k == 8 for k in mroi_copy.get_id(): assert_array_equal(mroi_copy.get_feature('a', k), foo_feature[mroi_copy.select_id(k)]) assert_array_equal(mroi_copy.get_roi_feature('b'), foo_roi_feature) def test_select_roi(): # Test select_roi method mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) mroi.set_roi_feature('data_mean', list(range(8))) mroi.select_roi([0]) assert(mroi.k == 1) assert mroi.roi_features['id'] == [0] assert mroi.get_roi_feature('data_mean', 0) == 0 mroi.select_roi([]) assert(mroi.k == 0) assert list(mroi.roi_features) == ['id'] assert list(mroi.roi_features['id']) == [] def test_roi_features(): """ """ mroi = make_subdomain() dshape = (8, 3) data = np.random.randn(*dshape) mroi.set_roi_feature('data_mean', data) assert mroi.roi_features['data_mean'].shape == dshape def test_subdomain_feature(): """Test the basic construction of features """ mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) assert mroi.features['data'][0] == data[0] def test_sd_integrate(): """Test the integration """ mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) sums = mroi.integrate('data') for k in range(8): assert sums[k] == np.sum(data[k]) def test_sd_integrate2(): """Test the integration """ mroi = make_subdomain() for k in mroi.get_id(): assert mroi.get_volume(k) == mroi.integrate(id=k) volume_from_integration = mroi.integrate() volume_from_feature = mroi.get_volume() for i in range(mroi.k): assert volume_from_feature[i] == volume_from_integration[i] def test_sd_representative(): """Test the computation of representative features """ mroi = make_subdomain() data = [[k] * mroi.get_size(k) for k in mroi.get_id()] mroi.set_feature('data', data) sums = mroi.representative_feature('data') for k in mroi.get_id(): assert sums[mroi.select_id(k)] == k def test_sd_from_ball(): dom = domain_from_binary_array(np.ones((10, 10))) radii = np.array([2, 2, 2]) positions = np.array([[3, 3], [3, 7], [7, 7]]) subdomain = subdomain_from_balls(dom, positions, radii) assert subdomain.k == 3 assert_array_equal(subdomain.get_size(), np.array([9, 9, 9])) def test_set_feature(): """Test the feature building capability """ mroi = make_subdomain() data = np.random.randn(np.prod(shape)) feature_data = [data[mroi.select_id(k, roi=False)] for k in mroi.get_id()] mroi.set_feature('data', feature_data) get_feature_output = mroi.get_feature('data') assert_array_equal([len(k) for k in mroi.get_feature('data')], mroi.get_size()) for k in mroi.get_id(): assert_array_equal(mroi.get_feature('data', k), data[mroi.select_id(k, roi=False)]) assert_array_equal(get_feature_output[k], data[mroi.select_id(k, roi=False)]) def test_set_feature2(): mroi = make_subdomain() data = np.random.randn(np.prod(shape)) feature_data = [data[mroi.select_id(k, roi=False)] for k in mroi.get_id()] mroi.set_feature('data', feature_data) mroi.set_feature('data', np.asarray([1000]), id=0, override=True) assert mroi.get_feature('data', 0) == [1000] def test_get_coord(): mroi = make_subdomain() for k in mroi.get_id(): assert_array_equal(mroi.get_coord(k), mroi.domain.coord[mroi.select_id(k, roi=False)]) def test_example(): # Test example runs correctly eg_img = pjoin(dirname(__file__), 'some_blobs.nii') nim = load(eg_img) arr = nim.get_fdata() ** 2 > 0 mask_image = Nifti1Image(arr.astype('u1'), get_affine(nim)) domain = grid_domain_from_image(mask_image) data = nim.get_fdata() values = data[data != 0] # parameters threshold = 3.0 # blob-forming threshold smin = 5 # size threshold on blobs # compute the nested roi object nroi = HROI_as_discrete_domain_blobs(domain, values, threshold=threshold, smin=smin) # compute region-level activation averages activation = [values[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) average_activation = nroi.representative_feature('activation') averages = [blob.mean() for blob in nroi.get_feature('activation')] assert_almost_equal(averages, average_activation, 6) # Test repeat assert_array_equal(average_activation, nroi.representative_feature('activation')) # Binary image is default bin_wim = nroi.to_image() bin_vox = bin_wim.get_fdata() assert_array_equal(np.unique(bin_vox), [0, 1]) id_wim = nroi.to_image('id', roi=True, descrip='description') id_vox = id_wim.get_fdata() mask = bin_vox.astype(bool) assert_array_equal(id_vox[~mask], -1) ids = nroi.get_id() assert_array_equal(np.unique(id_vox), [-1] + list(ids)) # Test activation wim = nroi.to_image('activation', roi=True, descrip='description') # Sadly, all cast to int assert_array_equal(np.unique(wim.get_fdata().astype(np.int32)), [-1, 3, 4, 5]) # end blobs or leaves lroi = nroi.copy() lroi.reduce_to_leaves() assert lroi.k == 14 assert len(lroi.get_feature('activation')) == lroi.k nipy-0.6.1/nipy/labs/spatial_models/tests/test_parcel.py000066400000000000000000000121101470056100100234030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nipy.algorithms.graph.field import field_from_coo_matrix_and_data from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset from ..discrete_domain import grid_domain_from_binary_array from ..hierarchical_parcellation import hparcel from ..parcellation import MultiSubjectParcellation def test_parcel_interface(): """ Simply test parcellation interface """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) tmp = np.array([np.sum(u == k) for k in range(nb_parcel)]) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, u) assert msp.nb_parcel == nb_parcel assert msp.nb_subj == 1 assert (msp.population().ravel() == tmp).all() def test_parcel_interface_multi_subj(): """ test parcellation interface, with multiple subjects """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 nb_subj = 5 v = [] for s in range(nb_subj): data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) v.append(u) v = np.array(v).T tmp = np.array([np.sum(v == k, 0) for k in range(nb_parcel)]) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, v) assert msp.nb_parcel == nb_parcel assert msp.nb_subj == nb_subj assert (msp.population() == tmp).all() def test_parcel_feature(): """ Simply test parcellation feature interface """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 data = np.random.randn(np.prod(shape), 1) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, u) msp.make_feature('data', data) assert msp.get_feature('data').shape == (nb_parcel, 1) # test with a copy msp2 = msp.copy() assert (msp2.get_feature('data') == msp2.get_feature('data')).all() # test a multi_dimensional feature dim = 4 msp.make_feature('new', np.random.randn(np.prod(shape), 1, dim)) assert msp.get_feature('new').shape == (nb_parcel, 1, dim) def test_parcel_feature_multi_subj(): """ Test parcellation feature interface with multiple subjects """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 nb_subj = 5 v = [] for s in range(nb_subj): data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) v.append(u) v = np.array(v).T msp = MultiSubjectParcellation(domain, u, v) # test a multi_dimensional feature # dimension 1 msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj)) assert msp.get_feature('data').shape == (nb_parcel, nb_subj) #dimension>1 dim = 4 msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj, dim)) assert msp.get_feature('data').shape == (nb_parcel, nb_subj, dim) # msp.features['data'] has been overridden assert list(msp.features.keys()) == ['data'] def test_parcel_hierarchical(): """Test the algorithm for hierrachical parcellation """ # step 1: generate some synthetic data n_subj = 10 shape = (30, 30) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape) # step 2 : prepare all the information for the parcellation nb_parcel = 10 domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3)) ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) # step 3 : run the algorithm Pa = hparcel(domain, ldata, nb_parcel) # step 4: look at the results Label = Pa.individual_labels control = True for s in range(n_subj): control *= (np.unique(Label[:, s]) == np.arange(nb_parcel)).all() assert(control) def test_prfx(): """Test the ability to construct parcel features and random effects models """ # step 1: generate some synthetic data n_subj = 10 shape = (30, 30) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape) # step 2 : prepare all the information for the parcellation nb_parcel = 10 domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3)) ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) # step 3 : run the algorithm Pa = hparcel(domain, ldata, nb_parcel) pdata = Pa.make_feature('functional', np.rollaxis(np.array(ldata), 1, 0)) one_sample = np.squeeze(pdata.mean(0) / pdata.std(0)) assert np.shape(one_sample) == (nb_parcel,) assert one_sample.mean() < 1 assert one_sample.mean() > -1 nipy-0.6.1/nipy/labs/spatial_models/tests/test_parcel_io.py000066400000000000000000000075031470056100100241040ustar00rootroot00000000000000from os.path import exists import numpy as np from nibabel import Nifti1Image, save from numpy.testing import assert_array_equal from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset from ..discrete_domain import grid_domain_from_shape from ..hierarchical_parcellation import hparcel from ..parcel_io import ( fixed_parcellation, mask_parcellation, parcellation_based_analysis, ) def test_mask_parcel(): """ Test that mask parcellation performs correctly """ n_parcels = 20 shape = (10, 10, 10) mask_image = Nifti1Image(np.ones(shape).astype('u1'), np.eye(4)) wim = mask_parcellation(mask_image, n_parcels) assert_array_equal(np.unique(wim.get_fdata()), np.arange(n_parcels)) def test_mask_parcel_multi_subj(in_tmp_path): """ Test that mask parcellation performs correctly """ rng = np.random.RandomState(0); n_parcels = 20 shape = (10, 10, 10) n_subjects = 5 mask_images = [] for subject in range(n_subjects): path = f'mask{subject}.nii' arr = rng.rand(*shape) > .1 save(Nifti1Image(arr.astype('u1'), np.eye(4)), path) mask_images.append(path) wim = mask_parcellation(mask_images, n_parcels) assert_array_equal(np.unique(wim.get_fdata()), np.arange(n_parcels)) def test_parcel_intra_from_3d_image(in_tmp_path): """Test that a parcellation is generated, starting from an input 3D image """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. mask_image = Nifti1Image(np.ones(shape).astype('u1'), np.eye(4)) surrogate_3d_dataset(mask=mask_image, out_image_file='image.nii') #run the algo for method in ['ward', 'kmeans', 'gkm']: osp = fixed_parcellation(mask_image, ['image.nii'], n_parcel, nn, method, in_tmp_path, mu) result = f'parcel_{method}.nii' assert exists(result) assert osp.k == n_parcel def test_parcel_intra_from_3d_images_list(in_tmp_path): """Test that a parcellation is generated, starting from a list of 3D images """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. method = 'ward' mask_image = Nifti1Image(np.ones(shape).astype('u1'), np.eye(4)) data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(mask=mask_image, out_image_file=datim) #run the algo osp = fixed_parcellation(mask_image, data_image, n_parcel, nn, method, in_tmp_path, mu) assert exists(f'parcel_{method}.nii') assert osp.k == n_parcel def test_parcel_intra_from_4d_image(in_tmp_path): """Test that a parcellation is generated, starting from a 4D image """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. method = 'ward' mask_image = Nifti1Image(np.ones(shape).astype('u1'), np.eye(4)) surrogate_3d_dataset(n_subj=10, mask=mask_image, out_image_file='image.nii') osp = fixed_parcellation(mask_image, ['image.nii'], n_parcel, nn, method, in_tmp_path, mu) assert exists(f'parcel_{method}.nii') assert osp.k == n_parcel def test_parcel_based_analysis(in_tmp_path): # Generate an image shape = (7, 8, 4) n_subj = 5 n_parcel, nn, mu = 10, 6, 1. data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(shape=shape, out_image_file=datim) ldata = np.random.randn(n_subj, np.prod(shape), 1) domain = grid_domain_from_shape(shape) parcels = hparcel(domain, ldata, n_parcel, mu=3.0) prfx = parcellation_based_analysis( parcels, data_image, test_id='one_sample', rfx_path='prfx.nii', condition_id='', swd=in_tmp_path) assert exists('prfx.nii') assert np.abs(prfx).max() < 15 nipy-0.6.1/nipy/labs/statistical_mapping.py000066400000000000000000000356701470056100100210130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats as sp_stats # Use the nibabel image object from nibabel import Nifti1Image as Image from nibabel.affines import apply_affine from ..algorithms.graph.field import field_from_graph_and_data from ..algorithms.graph.graph import wgraph_from_3d_grid from ..algorithms.statistics import empirical_pvalue from ..io.nibcompat import get_affine from .glm import glm from .group.permutation_test import ( permutation_test_onesample, permutation_test_twosample, ) # FIXME: rename permutation_test_onesample class #so that name starts with upper case ############################################################################### # Cluster statistics ############################################################################### def bonferroni(p, n): return np.minimum(1., p * n) def simulated_pvalue(t, simu_t): return 1 - np.searchsorted(simu_t, t) / float(np.size(simu_t)) def cluster_stats(zimg, mask, height_th, height_control='fpr', cluster_th=0, nulls={}): """ Return a list of clusters, each cluster being represented by a dictionary. Clusters are sorted by descending size order. Within each cluster, local maxima are sorted by descending depth order. Parameters ---------- zimg: z-score image mask: mask image height_th: cluster forming threshold height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_th: cluster size threshold null_s : cluster-level calibration method: None|'rft'|array Notes ----- This works only with three dimensional data """ # Masking if len(mask.shape) > 3: xyz = np.where((mask.get_fdata() > 0).squeeze()) zmap = zimg.get_fdata().squeeze()[xyz] else: xyz = np.where(mask.get_fdata() > 0) zmap = zimg.get_fdata()[xyz] xyz = np.array(xyz).T nvoxels = np.size(xyz, 0) # Thresholding if height_control == 'fpr': zth = sp_stats.norm.isf(height_th) elif height_control == 'fdr': zth = empirical_pvalue.gaussian_fdr_threshold(zmap, height_th) elif height_control == 'bonferroni': zth = sp_stats.norm.isf(height_th / nvoxels) else: ## Brute-force thresholding zth = height_th pth = sp_stats.norm.sf(zth) above_th = zmap > zth if len(np.where(above_th)[0]) == 0: return None, None ## FIXME zmap_th = zmap[above_th] xyz_th = xyz[above_th] # Clustering ## Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th) maxima, depth = ff.get_local_maxima(th=zth) labels = ff.cc() ## Make list of clusters, each cluster being a dictionary clusters = [] for k in range(labels.max() + 1): s = np.sum(labels == k) if s >= cluster_th: in_cluster = labels[maxima] == k m = maxima[in_cluster] d = depth[in_cluster] sorted = d.argsort()[::-1] clusters.append({'size': s, 'maxima': m[sorted], 'depth': d[sorted]}) ## Sort clusters by descending size order clusters.sort(key=lambda c : c['size'], reverse=True) # FDR-corrected p-values fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th] # Default "nulls" if 'zmax' not in nulls: nulls['zmax'] = 'bonferroni' if 'smax' not in nulls: nulls['smax'] = None if 's' not in nulls: nulls['s'] = None # Report significance levels in each cluster for c in clusters: maxima = c['maxima'] zscore = zmap_th[maxima] pval = sp_stats.norm.sf(zscore) # Replace array indices with real coordinates c['maxima'] = apply_affine(get_affine(zimg), xyz_th[maxima]) c['zscore'] = zscore c['pvalue'] = pval c['fdr_pvalue'] = fdr_pvalue[maxima] # Voxel-level corrected p-values p = None if isinstance(nulls['zmax'], np.ndarray): p = simulated_pvalue(zscore, nulls['zmax']) elif nulls['zmax'] == 'bonferroni': p = bonferroni(pval, nvoxels) c['fwer_pvalue'] = p # Cluster-level p-values (corrected) p = None if isinstance(nulls['smax'], np.ndarray): p = simulated_pvalue(c['size'], nulls['smax']) c['cluster_fwer_pvalue'] = p # Cluster-level p-values (uncorrected) p = None if isinstance(nulls['s'], np.ndarray): p = simulated_pvalue(c['size'], nulls['s']) c['cluster_pvalue'] = p # General info info = {'nvoxels': nvoxels, 'threshold_z': zth, 'threshold_p': pth, 'threshold_pcorr': bonferroni(pth, nvoxels)} return clusters, info ############################################################################### # Peak_extraction ############################################################################### def get_3d_peaks(image, mask=None, threshold=0., nn=18, order_th=0): """ returns all the peaks of image that are with the mask and above the provided threshold Parameters ---------- image, (3d) test image mask=None, (3d) mask image By default no masking is performed threshold=0., float, threshold value above which peaks are considered nn=18, int, number of neighbours of the topological spatial model order_th=0, int, threshold on topological order to validate the peaks Returns ------- peaks, a list of dictionaries, where each dict has the fields: vals, map value at the peak order, topological order of the peak ijk, array of shape (1,3) grid coordinate of the peak pos, array of shape (n_maxima,3) mm coordinates (mapped by affine) of the peaks """ # Masking if mask is not None: bmask = mask.get_fdata().ravel() data = image.get_fdata().ravel()[bmask > 0] xyz = np.array(np.where(bmask > 0)).T else: shape = image.shape data = image.get_fdata().ravel() xyz = np.reshape(np.indices(shape), (3, np.prod(shape))).T affine = get_affine(image) if not (data > threshold).any(): return None # Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz, k=18), data) maxima, order = ff.get_local_maxima(th=threshold) # retain only the maxima greater than the specified order maxima = maxima[order > order_th] order = order[order > order_th] n_maxima = len(maxima) if n_maxima == 0: # should not occur ? return None # reorder the maxima to have decreasing peak value vals = data[maxima] idx = np.argsort(- vals) maxima = maxima[idx] order = order[idx] vals = data[maxima] ijk = xyz[maxima] pos = np.dot(np.hstack((ijk, np.ones((n_maxima, 1)))), affine.T)[:, :3] peaks = [{'val': vals[k], 'order': order[k], 'ijk': ijk[k], 'pos': pos[k]} for k in range(n_maxima)] return peaks ############################################################################### # Statistical tests ############################################################################### def prepare_arrays(data_images, vardata_images, mask_images): from .mask import intersect_masks # Compute mask intersection mask = intersect_masks(mask_images, threshold=1.) # Compute xyz coordinates from mask xyz = np.array(np.where(mask > 0)) # Prepare data & vardata arrays data = np.array([(d.get_fdata()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in data_images]).squeeze() if vardata_images is None: vardata = None else: vardata = np.array([(d.get_fdata()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in vardata_images]).squeeze() return data, vardata, xyz, mask def onesample_test(data_images, vardata_images, mask_images, stat_id, permutations=0, cluster_forming_th=0.01): """ Helper function for permutation-based mass univariate onesample group analysis. """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images, mask_images) # Create one-sample permutation test instance ptest = permutation_test_onesample(data, xyz, vardata=vardata, stat_id=stat_id) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[tuple(xyz)] = ptest.zscore() zimg = Image(zmap, get_affine(data_images[0])) # Compute mask image maskimg = Image(mask.astype(np.int8), get_affine(data_images[0])) # Multiple comparisons if permutations <= 0: return zimg, maskimg else: # Cluster definition: (threshold, diameter) cluster_def = (ptest.height_threshold(cluster_forming_th), None) # Calibration voxel_res, cluster_res, region_res = \ ptest.calibrate(nperms=permutations, clusters=[cluster_def]) nulls = {} nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values']) nulls['s'] = cluster_res[0]['perm_size_values'] nulls['smax'] = cluster_res[0]['perm_maxsize_values'] # Return z-map image, mask image and dictionary of null distribution # for cluster sizes (s), max cluster size (smax) and max z-score (zmax) return zimg, maskimg, nulls def twosample_test(data_images, vardata_images, mask_images, labels, stat_id, permutations=0, cluster_forming_th=0.01): """ Helper function for permutation-based mass univariate twosample group analysis. Labels is a binary vector (1-2). Regions more active for group 1 than group 2 are inferred. """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images, mask_images) # Create two-sample permutation test instance if vardata_images is None: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, stat_id=stat_id) else: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, vardata1=vardata[labels == 1], vardata2=vardata[labels == 2], stat_id=stat_id) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[tuple(xyz)] = ptest.zscore() zimg = Image(zmap, get_affine(data_images[0])) # Compute mask image maskimg = Image(mask, get_affine(data_images[0])) # Multiple comparisons if permutations <= 0: return zimg, maskimg else: # Cluster definition: (threshold, diameter) cluster_def = (ptest.height_threshold(cluster_forming_th), None) # Calibration voxel_res, cluster_res, region_res = \ ptest.calibrate(nperms=permutations, clusters=[cluster_def]) nulls = {} nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values']) nulls['s'] = cluster_res[0]['perm_size_values'] nulls['smax'] = cluster_res[0]['perm_maxsize_values'] # Return z-map image, mask image and dictionary of null # distribution for cluster sizes (s), max cluster size (smax) # and max z-score (zmax) return zimg, maskimg, nulls ############################################################################### # Linear model ############################################################################### def linear_model_fit(data_images, mask_images, design_matrix, vector): """ Helper function for group data analysis using arbitrary design matrix """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, None, mask_images) # Create glm instance G = glm(data, design_matrix) # Compute requested contrast c = G.contrast(vector) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[tuple(xyz)] = c.zscore() zimg = Image(zmap, get_affine(data_images[0])) return zimg class LinearModel: def_model = 'spherical' def_niter = 2 def __init__(self, data, design_matrix, mask=None, formula=None, model=def_model, method=None, niter=def_niter): # Convert input data and design into sequences if not hasattr(data, '__iter__'): data = [data] if not hasattr(design_matrix, '__iter__'): design_matrix = [design_matrix] # configure spatial properties # the 'sampling' direction is assumed to be the last # TODO: check that all input images have the same shape and # that it's consistent with the mask nomask = mask is None if nomask: self.xyz = None self.axis = len(data[0].shape) - 1 else: self.xyz = np.where(mask.get_fdata() > 0) self.axis = 1 self.spatial_shape = data[0].shape[0: -1] self.affine = get_affine(data[0]) self.glm = [] for i in range(len(data)): if not isinstance(design_matrix[i], np.ndarray): raise ValueError('Invalid design matrix') if nomask: Y = data[i].get_fdata() else: Y = data[i].get_fdata()[self.xyz] X = design_matrix[i] self.glm.append(glm(Y, X, axis=self.axis, formula=formula, model=model, method=method, niter=niter)) def dump(self, filename): """Dump GLM fit as npz file. """ models = len(self.glm) if models == 1: self.glm[0].save(filename) else: for i in range(models): self.glm[i].save(filename + str(i)) def contrast(self, vector): """Compute images of contrast and contrast variance. """ # Compute the overall contrast across models c = self.glm[0].contrast(vector) for g in self.glm[1:]: c += g.contrast(vector) def affect_inmask(dest, src, xyz): if xyz is None: dest = src else: dest[xyz] = src return dest con = np.zeros(self.spatial_shape) con_img = Image(affect_inmask(con, c.effect, self.xyz), self.affine) vcon = np.zeros(self.spatial_shape) vcon_img = Image(affect_inmask(vcon, c.variance, self.xyz), self.affine) z = np.zeros(self.spatial_shape) z_img = Image(affect_inmask(z, c.zscore(), self.xyz), self.affine) dof = c.dof return con_img, vcon_img, z_img, dof ############################################################################### # Hack to have test framework skip onesample_test, which is not a unit test onesample_test.__test__ = False twosample_test.__test__ = False nipy-0.6.1/nipy/labs/tests/000077500000000000000000000000001470056100100155315ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/tests/__init__.py000066400000000000000000000000001470056100100176300ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/tests/test_mask.py000066400000000000000000000111051470056100100200730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the mask-extracting utilities. """ import nibabel as nib import numpy as np from nipy.testing import ( anatfile, assert_array_equal, ) from .. import mask as nnm from ..mask import largest_cc, series_from_mask, threshold_connect_components def test_largest_cc(): """ Check the extraction of the largest connected component. """ a = np.zeros((6, 6, 6)) a[1:3, 1:3, 1:3] = 1 assert_array_equal(a, largest_cc(a)) b = a.copy() b[5, 5, 5] = 1 assert_array_equal(a, largest_cc(b)) def test_threshold_connect_components(): a = np.zeros((10, 10)) a[0, 0] = 1 a[3, 4] = 1 a = threshold_connect_components(a, 2) assert np.all(a == 0) a[0, 0:3] = 1 b = threshold_connect_components(a, 2) assert np.all(a == b) def test_mask(): mean_image = np.ones((9, 9)) mean_image[3:-3, 3:-3] = 10 mean_image[5, 5] = 100 mask1 = nnm.compute_mask(mean_image) mask2 = nnm.compute_mask(mean_image, exclude_zeros=True) # With an array with no zeros, exclude_zeros should not make # any difference assert_array_equal(mask1, mask2) # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30)) mean_image2[:9, :9] = mean_image mask3 = nnm.compute_mask(mean_image2, exclude_zeros=True) assert_array_equal(mask1, mask3[:9, :9]) # However, without exclude_zeros, it does mask3 = nnm.compute_mask(mean_image2) assert not np.allclose(mask1, mask3[:9, :9]) # check that opening is 2 by default mask4 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=2) assert_array_equal(mask1, mask4) # check that opening has an effect mask5 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=0) assert mask5.sum() > mask4.sum() def test_mask_files(in_tmp_path): # Make a 4D file from the anatomical example img = nib.load(anatfile) arr = img.get_fdata() a2 = np.zeros(arr.shape + (2, )) a2[:, :, :, 0] = arr a2[:, :, :, 1] = arr img = nib.Nifti1Image(a2, np.eye(4)) a_fname = 'fourd_anat.nii' nib.save(img, a_fname) # check 4D mask msk1, mean1 = nnm.compute_mask_files(a_fname, return_mean=True) # and mask from identical list of 3D files msk2, mean2 = nnm.compute_mask_files([anatfile, anatfile], return_mean=True) assert_array_equal(msk1, msk2) assert_array_equal(mean1, mean2) def test_series_from_mask(in_tmp_path): """ Test the smoothing of the timeseries extraction """ # A delta in 3D data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 mask = np.ones((40, 40, 40), dtype=np.bool_) for affine in (np.eye(4), np.diag((1, 1, -1, 1)), np.diag((.5, 1, .5, 1))): img = nib.Nifti1Image(data, affine) nib.save(img, 'testing.nii') series, header = series_from_mask('testing.nii', mask, smooth=9) series = np.reshape(series[:, 0], (40, 40, 40)) vmax = series.max() # We are expecting a full-width at half maximum of # 9mm/voxel_size: above_half_max = series > .5*vmax for axis in (0, 1, 2): proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis), axis=-1), axis=-1) assert proj.sum() == 9/np.abs(affine[axis, axis]) # Check that NaNs in the data do not propagate data[10, 10, 10] = np.nan img = nib.Nifti1Image(data, affine) nib.save(img, 'testing.nii') series, header = series_from_mask('testing.nii', mask, smooth=9) assert np.all(np.isfinite(series)) def test_compute_mask_sessions(in_tmp_path): """Test that the mask computes well on multiple sessions """ # Make a 4D file from the anatomical example img = nib.load(anatfile) arr = img.get_fdata() a2 = np.zeros(arr.shape + (2, )) a2[:, :, :, 0] = arr a2[:, :, :, 1] = arr img = nib.Nifti1Image(a2, np.eye(4)) a_fname = 'fourd_anat.nii' nib.save(img, a_fname) a3 = a2.copy() a3[:10, :10, :10] = 0 img2 = nib.Nifti1Image(a3, np.eye(4)) # check 4D mask msk1 = nnm.compute_mask_sessions([img2, img2]) msk2 = nnm.compute_mask_sessions([img2, a_fname]) assert_array_equal(msk1, msk2) msk3 = nnm.compute_mask_sessions([img2, a_fname], threshold=.9) msk4 = nnm.compute_mask_sessions([img2, a_fname], threshold=0) msk5 = nnm.compute_mask_sessions([a_fname, a_fname]) assert_array_equal(msk1, msk3) assert_array_equal(msk4, msk5) nipy-0.6.1/nipy/labs/tests/test_statistical_mapping.py000066400000000000000000000046501470056100100232060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nibabel import Nifti1Image from ..statistical_mapping import cluster_stats from ..utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset def make_surrogate_data(): """ Return a single deterministic 3D image """ shape = (40, 40) pos = np.array([[ 2, 10], [10, 4], [20, 30], [30, 20]]) ampli = np.array([5, 5, 5, 5]) data = surrogate_2d_dataset(n_subj=1, pos=pos, shape=shape, noise_level=0, ampli=ampli, spatial_jitter=0, signal_jitter=0).squeeze() data = np.reshape(data, (shape[0], shape[1], 1)) return Nifti1Image(data, np.eye(4)) def test1(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=0, nulls={}) assert len(clusters)==4 def test2(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=5, nulls={}) assert len(clusters)==4 def test3(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=10, nulls={}) assert len(clusters)==0 def test_4(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.001, height_control='fpr', cluster_th=0, nulls={}) assert len(clusters)==4 def test_5(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.05, height_control='bonferroni', cluster_th=0, nulls={}) assert len(clusters)==4 def test_6(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.05, height_control='fdr', cluster_th=0, nulls={}) print(len(clusters), sum(c['size'] for c in clusters)) assert len(clusters)==4 def test7(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=0, nulls={}) nstv = sum(c['size'] for c in clusters) assert nstv==36 def test_8(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.001, height_control='fpr', cluster_th=0, nulls={}) nstv = sum(c['size'] for c in clusters) assert nstv==36 nipy-0.6.1/nipy/labs/tests/test_viz.py000066400000000000000000000016471470056100100177620ustar00rootroot00000000000000""" Tests for visualization """ import numpy as np from nipy.labs.viz import coord_transform, mni_sform, plot_map def test_example(): # Example from tutorial. # First, create a fake activation map: a 3D image in MNI space with # a large rectangle of activation around Broca Area mni_sform_inv = np.linalg.inv(mni_sform) # Color an asymmetric rectangle around Broca area: x, y, z = -52, 10, 22 x_map, y_map, z_map = (int(coord) for coord in coord_transform(x, y, z, mni_sform_inv)) map = np.zeros((182, 218, 182)) map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 # We use a masked array to add transparency to the parts that we are # not interested in: thresholded_map = np.ma.masked_less(map, 0.5) # And now, visualize it: plot_map(thresholded_map, mni_sform, cut_coords=(x, y, z), vmin=0.5) nipy-0.6.1/nipy/labs/utils/000077500000000000000000000000001470056100100155275ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/utils/__init__.py000066400000000000000000000004241470056100100176400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .routines import ( combinations, gamln, mahalanobis, median, permutations, psi, quantile, svd, ) from .zscore import zscore nipy-0.6.1/nipy/labs/utils/mask.py000066400000000000000000000005111470056100100170310ustar00rootroot00000000000000""" Compatibility module """ import warnings warnings.warn(DeprecationWarning( "This module (nipy.labs.utils.mask) has been moved and " "is depreciated. Please update your code to import from " "'nipy.labs.mask'.")) # Absolute import, as 'import *' does not work with relative imports from nipy.labs.mask import * nipy-0.6.1/nipy/labs/utils/meson.build000066400000000000000000000011201470056100100176630ustar00rootroot00000000000000target_dir = 'nipy/labs/utils' py.extension_module('routines', cython_gen_cstat.process('routines.pyx'), dependencies: cstat_dep, c_args: cython_c_args, include_directories: [ incdir_numpy, '../../../lib/fff', '../../../lib/fff_python_wrapper' ], install: true, subdir: target_dir ) python_sources = [ '__init__.py', 'mask.py', 'reproducibility_measures.py', 'simul_multisubject_fmri_dataset.py', 'zscore.py' ] py.install_sources( python_sources, pure: false, subdir: target_dir ) install_subdir('tests', install_dir: install_root / target_dir) nipy-0.6.1/nipy/labs/utils/reproducibility_measures.py000066400000000000000000000605141470056100100232240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Functions for computing reproducibility measures. General procedure is: - dataset is subject to jacknife subampling ('splitting'), - each subsample being analysed independently, - a reproducibility measure is then derived; It is used to produce the work described in Analysis of a large fMRI cohort: Statistical and methodological issues for group analyses. Thirion B, Pinel P, Meriaux S, Roche A, Dehaene S, Poline JB. Neuroimage. 2007 Mar;35(1):105-20. Bertrand Thirion, 2009-2010 """ import numpy as np from nipy.io.nibcompat import get_affine from nipy.labs.spatial_models.discrete_domain import grid_domain_from_binary_array # --------------------------------------------------------- # ----- cluster handling functions ------------------------ # --------------------------------------------------------- def histo_repro(h): """ Given the histogram h, compute a standardized reproducibility measure Parameters ---------- h array of shape(xmax+1), the histogram values Returns ------- hr, float: the measure """ k = np.size(h) - 1 if k == 1: return 0. nf = np.dot(h, np.arange(k + 1)) / k if nf == 0: return 0. n1k = np.arange(1, k + 1) res = 1.0 * np.dot(h[1:], n1k * (n1k - 1)) / (k * (k - 1)) return res / nf def cluster_threshold(stat_map, domain, th, csize): """Perform a thresholding of a map at the cluster-level Parameters ---------- stat_map: array of shape(nbvox) the input data domain: Nifti1Image instance, referential- and domain-defining image th (float): cluster-forming threshold csize (int>0): cluster size threshold Returns ------- binary array of shape (nvox): the binarized thresholded map Notes ----- Should be replaced by a more standard function in the future """ if stat_map.shape[0] != domain.size: raise ValueError('incompatible dimensions') # first build a domain of supra_threshold regions thresholded_domain = domain.mask(stat_map > th) # get the connected components label = thresholded_domain.connected_components() binary = - np.ones(domain.size) binary[stat_map > th] = label nbcc = len(np.unique(label)) for i in range(nbcc): if np.sum(label == i) < csize: binary[binary == i] = - 1 binary = (binary > -1) return binary def get_cluster_position_from_thresholded_map(stat_map, domain, thr=3.0, csize=10): """ the clusters above thr of size greater than csize in 18-connectivity are computed Parameters ---------- stat_map : array of shape (nbvox), map to threshold mask: Nifti1Image instance, referential- and domain-defining image thr: float, optional, cluster-forming threshold cisze=10: int cluster size threshold Returns ------- positions array of shape(k,anat_dim): the cluster positions in physical coordinates where k= number of clusters if no such cluster exists, None is returned """ # if no supra-threshold voxel, return if (stat_map <= thr).all(): return None # first build a domain of supra_threshold regions thresholded_domain = domain.mask(stat_map > thr) # get the connected components label = thresholded_domain.connected_components() # get the coordinates coord = thresholded_domain.get_coord() # get the barycenters baryc = [ np.mean(coord[label == i], 0) for i in range(label.max() + 1) if np.sum(label == i) >= csize ] if len(baryc) == 0: return None baryc = np.vstack(baryc) return baryc def get_peak_position_from_thresholded_map(stat_map, domain, threshold): """The peaks above thr in 18-connectivity are computed Parameters ---------- stat_map: array of shape (nbvox): map to threshold deomain: referential- and domain-defining image thr, float: cluster-forming threshold Returns ------- positions array of shape(k,anat_dim): the cluster positions in physical coordinates where k= number of clusters if no such cluster exists, None is returned """ from ..statistical_mapping import get_3d_peaks # create an image to represent stat_map simage = domain.to_image(data=stat_map) # extract the peaks peaks = get_3d_peaks(simage, threshold=threshold, order_th=2) if peaks is None: return None pos = np.array([p['pos'] for p in peaks]) return pos # --------------------------------------------------------- # ----- data splitting functions ------------------------ # --------------------------------------------------------- def bootstrap_group(nsubj, ngroups): """Split the proposed group into redundant subgroups by bootstrap Parameters ---------- nsubj (int) the number of subjects in the population ngroups(int) Number of subbgroups to be drawn Returns ------- samples: a list of ngroups arrays containing the indexes of the subjects in each subgroup """ groupsize = nsubj samples = [(groupsize * np.random.rand(groupsize)).astype(np.int_) for i in range(ngroups)] return samples def split_group(nsubj, ngroups): """Split the proposed group into random disjoint subgroups Parameters ---------- nsubj (int) the number of subjects to be split ngroups(int) Number of subbgroups to be drawn Returns ------- samples: a list of ngroups arrays containing the indexes of the subjects in each subgroup """ groupsize = int(np.floor(nsubj / ngroups)) rperm = np.argsort(np.random.rand(nsubj)) samples = [rperm[i * groupsize: (i + 1) * groupsize] for i in range(ngroups)] return samples # --------------------------------------------------------- # ----- statistic computation ----------------------------- # --------------------------------------------------------- def conjunction(x, vx, k): """Returns a conjunction statistic as the sum of the k lowest t-values Parameters ---------- x: array of shape(nrows, ncols), effect matrix vx: array of shape(nrows, ncols), variance matrix k: int, number of subjects in the conjunction Returns ------- t array of shape(nrows): conjunction statistic """ t = np.sort(x / np.sqrt(np.maximum(vx, 1.e-15))) cjt = np.sum(t[:, :k], 1) return cjt def ttest(x): """Returns the t-test for each row of the data x """ from ..group.onesample import stat t = stat(x.T, id='student', axis=0) return np.squeeze(t) def fttest(x, vx): """Assuming that x and vx represent a effect and variance estimates, returns a cumulated ('fixed effects') t-test of the data over each row Parameters ---------- x: array of shape(nrows, ncols): effect matrix vx: array of shape(nrows, ncols): variance matrix Returns ------- t array of shape(nrows): fixed effect statistics array """ if np.shape(x) != np.shape(vx): raise ValueError("incompatible dimensions for x and vx") n = x.shape[1] t = x / np.sqrt(np.maximum(vx, 1.e-15)) t = t.mean(1) * np.sqrt(n) return t def mfx_ttest(x, vx): """Idem fttest, but returns a mixed-effects statistic Parameters ---------- x: array of shape(nrows, ncols): effect matrix vx: array of shape(nrows, ncols): variance matrix Returns ------- t array of shape(nrows): mixed effect statistics array """ from ..group.onesample import stat_mfx t = stat_mfx(x.T, vx.T, id='student_mfx', axis=0) return np.squeeze(t) def voxel_thresholded_ttest(x, threshold): """Returns a binary map of the ttest>threshold """ t = ttest(x) return t > threshold def statistics_from_position(target, data, sigma=1.0): """ Return a number characterizing how close data is from target using a kernel-based statistic Parameters ---------- target: array of shape(nt,anat_dim) or None the target positions data: array of shape(nd,anat_dim) or None the data position sigma=1.0 (float), kernel parameter or a distance that say how good good is Returns ------- sensitivity (float): how well the targets are fitted by the data in [0,1] interval 1 is good 0 is bad """ from ...algorithms.utils.fast_distance import euclidean_distance as ed if data is None: if target is None: return 0.# could be 1.0 ? else: return 0. if target is None: return 0. dmatrix = ed(data, target) / sigma sensitivity = dmatrix.min(0) sensitivity = np.exp( - 0.5 * sensitivity ** 2) sensitivity = np.mean(sensitivity) return sensitivity # ------------------------------------------------------- # ---------- The main functions ----------------------------- # ------------------------------------------------------- def voxel_reproducibility(data, vardata, domain, ngroups, method='crfx', swap=False, verbose=0, **kwargs): """ return a measure of voxel-level reproducibility of activation patterns Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the corresponding variance information ngroups (int): Number of subbgroups to be drawn domain: referential- and domain-defining image ngourps: int, number of groups to be used in the resampling procedure method: string, to be chosen among 'crfx', 'cmfx', 'cffx' inference method under study verbose: bool, verbosity mode Returns ------- kappa (float): the desired reproducibility index """ rmap = map_reproducibility(data, vardata, domain, ngroups, method, swap, verbose, **kwargs) h = np.array([np.sum(rmap == i) for i in range(ngroups + 1)]) hr = histo_repro(h) return hr def draw_samples(nsubj, ngroups, split_method='default'): """ Draw randomly ngroups sets of samples from [0..nsubj-1] Parameters ---------- nsubj, int, the total number of items ngroups, int, the number of desired groups split_method: string, optional, to be chosen among 'default', 'bootstrap', 'jacknife' if 'bootstrap', then each group will be nsubj drawn with repetitions among nsubj if 'jacknife' the population is divided into ngroups disjoint equally-sized subgroups if 'default', 'bootstrap' is used when nsubj < 10 * ngroups otherwise jacknife is used Returns ------- samples, a list of ngroups array that represent the subsets. fixme : this should allow variable bootstrap, i.e. draw ngroups of groupsize among nsubj """ if split_method == 'default': if nsubj > 10 * ngroups: samples = split_group(nsubj, ngroups) else: samples = bootstrap_group(nsubj, ngroups) elif split_method == 'bootstrap': samples = bootstrap_group(nsubj, ngroups) elif split_method == '': samples = split_group(nsubj, ngroups) else: raise ValueError('unknown splitting method') return samples def map_reproducibility(data, vardata, domain, ngroups, method='crfx', swap=False, verbose=0, **kwargs): """ Return a reproducibility map for the given method Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of the same size the corresponding variance information domain: referential- and domain-defining image ngroups (int): the size of each subrgoup to be studied threshold (float): binarization threshold (makes sense only if method==rfx) method='crfx', string to be chosen among 'crfx', 'cmfx', 'cffx' inference method under study verbose=0 : verbosity mode Returns ------- rmap: array of shape(nvox) the reproducibility map """ nsubj = data.shape[1] nvox = data.shape[0] samples = draw_samples(nsubj, ngroups) rmap = np.zeros(nvox) for i in range(ngroups): x = data[:, samples[i]] if swap: # randomly swap the sign of x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method != 'crfx': vx = vardata[:, samples[i]] csize = kwargs['csize'] threshold = kwargs['threshold'] # compute the statistical maps according to the method you like if method == 'crfx': stat_map = ttest(x) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cjt': # if kwargs.has_key('k'): if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) else: raise ValueError('unknown method') # add the binarized map to a reproducibility map rmap += cluster_threshold(stat_map, domain, threshold, csize) > 0 return rmap def peak_reproducibility(data, vardata, domain, ngroups, sigma, method='crfx', swap=False, verbose=0, **kwargs): """ Return a measure of cluster-level reproducibility of activation patterns (i.e. how far clusters are from each other) Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the variance of the data that is also available domain: referential- and domain-defining image ngroups (int), Number of subbgroups to be drawn sigma: float, parameter that encodes how far far is threshold: float, binarization threshold method: string to be chosen among 'crfx', 'cmfx' or 'cffx', inference method under study swap = False: if True, a random sign swap of the data is performed This is used to simulate a null hypothesis on the data. verbose=0 : verbosity mode Returns ------- score (float): the desired cluster-level reproducibility index """ tiny = 1.e-15 nsubj = data.shape[1] samples = draw_samples(nsubj, ngroups) all_pos = [] # compute the positions in the different subgroups for i in range(ngroups): x = data[:, samples[i]] if swap: # apply a random sign swap to x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method != 'crfx': vx = vardata[:, samples[i]] if method != 'bsa': threshold = kwargs['threshold'] if method == 'crfx': stat_map = ttest(x) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cjt': if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) pos = get_peak_position_from_thresholded_map( stat_map, domain, threshold) all_pos.append(pos) else: # method='bsa' is a special case tx = x / (tiny + np.sqrt(vx)) afname = kwargs['afname'] theta = kwargs['theta'] dmax = kwargs['dmax'] ths = kwargs['ths'] thq = kwargs['thq'] smin = kwargs['smin'] niter = kwargs['niter'] afname = afname + '_%02d_%04d.pic' % (niter, i) pos = coord_bsa(domain, tx, theta, dmax, ths, thq, smin, afname) all_pos.append(pos) # derive a kernel-based goodness measure from the pairwise comparison # of sets of positions score = 0 for i in range(ngroups): for j in range(i): score += statistics_from_position(all_pos[i], all_pos[j], sigma) score += statistics_from_position(all_pos[j], all_pos[i], sigma) score /= (ngroups * (ngroups - 1)) return score def cluster_reproducibility(data, vardata, domain, ngroups, sigma, method='crfx', swap=False, verbose=0, **kwargs): """Returns a measure of cluster-level reproducibility of activation patterns (i.e. how far clusters are from each other) Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the variance of the data that is also available domain: referential- and domain- defining image instance ngroups (int), Number of subbgroups to be drawn sigma (float): parameter that encodes how far far is threshold (float): binarization threshold method='crfx', string to be chosen among 'crfx', 'cmfx' or 'cffx' inference method under study swap = False: if True, a random sign swap of the data is performed This is used to simulate a null hypothesis on the data. verbose=0 : verbosity mode Returns ------- score (float): the desired cluster-level reproducibility index """ tiny = 1.e-15 nsubj = data.shape[1] samples = draw_samples(nsubj, ngroups) all_pos = [] # compute the positions in the different subgroups for i in range(ngroups): x = data[:, samples[i]] if swap: # apply a random sign swap to x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method != 'crfx': vx = vardata[:, samples[i]] if method != 'bsa': csize = kwargs['csize'] threshold = kwargs['threshold'] if method == 'crfx': stat_map = ttest(x) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cjt': if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) pos = get_cluster_position_from_thresholded_map(stat_map, domain, threshold, csize) all_pos.append(pos) else: # method='bsa' is a special case tx = x / (tiny + np.sqrt(vx)) afname = kwargs['afname'] theta = kwargs['theta'] dmax = kwargs['dmax'] ths = kwargs['ths'] thq = kwargs['thq'] smin = kwargs['smin'] niter = kwargs['niter'] afname = afname + '_%02d_%04d.pic' % (niter, i) pos = coord_bsa(domain, tx, theta, dmax, ths, thq, smin, afname) all_pos.append(pos) # derive a kernel-based goodness measure from the pairwise comparison # of sets of positions score = 0 for i in range(ngroups): for j in range(i): score += statistics_from_position(all_pos[i], all_pos[j], sigma) score += statistics_from_position(all_pos[j], all_pos[i], sigma) score /= (ngroups * (ngroups - 1)) return score def group_reproducibility_metrics( mask_images, contrast_images, variance_images, thresholds, ngroups, method, cluster_threshold=10, number_of_samples=10, sigma=6., do_clusters=True, do_voxels=True, do_peaks=True, swap=False): """ Main function to perform reproducibility analysis, including nifti1 io Parameters ---------- threshold: list or 1-d array, the thresholds to be tested Returns ------- cluster_rep_results: dictionary, results of cluster-level reproducibility analysis voxel_rep_results: dictionary, results of voxel-level reproducibility analysis peak_rep_results: dictionary, results of peak-level reproducibility analysis """ from nibabel import load from ..mask import intersect_masks if len(variance_images) == 0 and method != 'crfx': raise ValueError('Variance images are necessary') nsubj = len(contrast_images) # compute the group mask affine = get_affine(load(mask_images[0])) mask = intersect_masks(mask_images, threshold=0) > 0 domain = grid_domain_from_binary_array(mask, affine) # read the data group_con = [] group_var = [] for s in range(nsubj): group_con.append(load(contrast_images[s]).get_fdata()[mask]) if len(variance_images) > 0: group_var.append(load(variance_images[s]).get_fdata()[mask]) group_con = np.squeeze(np.array(group_con)).T group_con[np.isnan(group_con)] = 0 if len(variance_images) > 0: group_var = np.squeeze(np.array(group_var)).T group_var[np.isnan(group_var)] = 0 group_var = np.maximum(group_var, 1.e-15) # perform the analysis voxel_rep_results = {} cluster_rep_results = {} peak_rep_results = {} for ng in ngroups: if do_voxels: voxel_rep_results.update({ng: {}}) if do_clusters: cluster_rep_results.update({ng: {}}) if do_peaks: peak_rep_results.update({ng: {}}) for th in thresholds: kappa = [] cls = [] pk = [] kwargs = {'threshold': th, 'csize': cluster_threshold} for i in range(number_of_samples): if do_voxels: kappa.append(voxel_reproducibility( group_con, group_var, domain, ng, method, swap, **kwargs)) if do_clusters: cls.append(cluster_reproducibility( group_con, group_var, domain, ng, sigma, method, swap, **kwargs)) if do_peaks: pk.append(peak_reproducibility( group_con, group_var, domain, ng, sigma, method, swap, **kwargs)) if do_voxels: voxel_rep_results[ng].update({th: np.array(kappa)}) if do_clusters: cluster_rep_results[ng].update({th: np.array(cls)}) if do_peaks: peak_rep_results[ng].update({th: np.array(cls)}) return voxel_rep_results, cluster_rep_results, peak_rep_results # ------------------------------------------------------- # ---------- BSA stuff ---------------------------------- # ------------------------------------------------------- def coord_bsa(domain, betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0, afname=None): """ main function for performing bsa on a dataset where bsa = nipy.labs.spatial_models.bayesian_structural_analysis Parameters ---------- domain: image instance, referential- and domain-defining image betas: array of shape (nbnodes, subjects), the multi-subject statistical maps theta: float, optional first level threshold dmax: float>0, optional expected cluster std in the common space in units of coord ths: int, >=0), optional representatitivity threshold thq: float, optional, posterior significance threshold should be in [0,1] smin: int, optional, minimal size of the regions to validate them afname: string, optional path where intermediate results cam be pickled Returns ------- afcoord array of shape(number_of_regions,3): coordinate of the found landmark regions """ from ..spatial_models.bayesian_structural_analysis import compute_BSA_quick crmap, AF, BF, p = compute_BSA_quick( domain, betas, dmax, thq, smin, ths, theta, verbose=0) if AF is None: return None if afname is not None: import pickle pickle.dump(AF, afname) afcoord = AF.discrete_to_roi_features('position') return afcoord nipy-0.6.1/nipy/labs/utils/routines.pyx000066400000000000000000000172341470056100100201500ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Miscellaneous fff routines. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * cimport numpy as cnp from warnings import warn warn('Module nipy.labs.utils.routines deprecated, will be removed', FutureWarning, stacklevel=2) # Exports from fff_gen_stats.h cdef extern from "fff_gen_stats.h": double fff_mahalanobis(fff_vector* x, fff_matrix* S, fff_matrix* Saux) void fff_permutation(unsigned int* x, unsigned int n, unsigned long int magic) void fff_combination(unsigned int* x, unsigned int k, unsigned int n, unsigned long magic) # Exports from fff_specfun.h cdef extern from "fff_specfun.h": extern double fff_gamln(double x) extern double fff_psi(double x) # Exports from fff_lapack.h cdef extern from "fff_lapack.h": extern int fff_lapack_dgesdd(fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux) # Initialize numpy fffpy_import_array() cnp.import_array() import numpy as np # This is faster than scipy.stats.scoreatpercentile due to partial # sorting def quantile(X, double ratio, int interp=False, int axis=0): """ q = quantile(data, ratio, interp=False, axis=0). Partial sorting algorithm, very fast!!! """ cdef fff_vector *x cdef fff_vector *y cdef fffpy_multi_iterator* multi # Allocate output array Y dims = list(X.shape) dims[axis] = 1 Y = np.zeros(dims) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, X, Y) # Create vector views on both X and Y x = multi.vector[0] y = multi.vector[1] # Loop while(multi.index < multi.size): y.data[0] = fff_vector_quantile(x, ratio, interp) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) return Y # This is faster than numpy.stats # due to the underlying algorithm that relies on # partial sorting as opposed to full sorting. def median(x, axis=0): """ median(x, axis=0). Equivalent to: quantile(x, ratio=0.5, interp=True, axis=axis). """ return quantile(x, axis=axis, ratio=0.5, interp=True) def mahalanobis(X, VX): """ d2 = mahalanobis(X, VX). ufunc-like function to compute Mahalanobis squared distances x'*inv(Vx)*x. axis == 0 assumed. If X is shaped (d,K), VX must be shaped (d,d,K). """ cdef fff_vector *x cdef fff_vector *vx cdef fff_vector *x_tmp cdef fff_vector *vx_tmp cdef fff_vector *d2 cdef fff_matrix Sx cdef fff_matrix *Sx_tmp cdef fffpy_multi_iterator* multi cdef int axis=0, n # Allocate output array dims = list(X.shape) dim = dims[0] dims[0] = 1 D2 = np.zeros(dims) # Flatten input variance array VX_flat = VX.reshape( [dim*dim]+list(VX.shape[2:]) ) # Create a new array iterator multi = fffpy_multi_iterator_new(3, axis, X, VX_flat, D2) # Allocate local structures n = X.shape[axis] x_tmp = fff_vector_new(n) vx_tmp = fff_vector_new(n*n) Sx_tmp = fff_matrix_new(n, n) # Create vector views on X, VX_flat and D2 x = multi.vector[0] vx = multi.vector[1] d2 = multi.vector[2] # Loop while(multi.index < multi.size): fff_vector_memcpy(x_tmp, x) fff_vector_memcpy(vx_tmp, vx) Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) fffpy_multi_iterator_update(multi) # Delete local structs and views fff_vector_delete(x_tmp) fff_vector_delete(vx_tmp) fff_matrix_delete(Sx_tmp) fffpy_multi_iterator_delete(multi) # Return D2 = D2.reshape(VX.shape[2:]) return D2 def svd(X): """ Singular value decomposition of array `X` Y = svd(X) ufunc-like svd. Given an array X (m, n, K), perform an SV decomposition. Parameters ---------- X : 2D array Returns ------- S : (min(m,n), K) """ cdef int axis=0 cdef int m, n, dmin, dmax, lwork, liwork, info cdef fff_vector *work cdef fff_vector *x_flat cdef fff_vector *x_flat_tmp cdef fff_vector *s cdef fff_vector *s_tmp cdef fff_matrix x cdef fff_array *iwork cdef fff_matrix *Aux cdef fff_matrix *U cdef fff_matrix *Vt cdef fffpy_multi_iterator* multi # Shape of matrices m = X.shape[0] n = X.shape[1] if m > n: dmin = n dmax = m else: dmin = m dmax = n # Create auxiliary arrays lwork = 4*dmin*(dmin+1) if dmax > lwork: lwork = dmax lwork = 2*(3*dmin*dmin + lwork) liwork = 8*dmin work = fff_vector_new(lwork) iwork = fff_array_new1d(FFF_INT, liwork) Aux = fff_matrix_new(dmax, dmax) U = fff_matrix_new(m, m) Vt = fff_matrix_new(n, n) x_flat_tmp = fff_vector_new(m*n) s_tmp = fff_vector_new(dmin) # Allocate output array endims = list(X.shape[2:]) S = np.zeros([dmin]+endims) # Flatten input array X_flat = X.reshape([m*n]+endims) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, X_flat, S) # Create vector views x_flat = multi.vector[0] s = multi.vector[1] # Loop while(multi.index < multi.size): fff_vector_memcpy(x_flat_tmp, x_flat) fff_vector_memcpy(s_tmp, s) x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) fff_vector_memcpy(s, s_tmp) fffpy_multi_iterator_update(multi) # Delete local structures fff_vector_delete(work) fff_vector_delete(x_flat_tmp) fff_vector_delete(s_tmp) fff_array_delete(iwork) fff_matrix_delete(Aux) fff_matrix_delete(U) fff_matrix_delete(Vt) fffpy_multi_iterator_delete(multi) # Return return S def permutations(unsigned int n, unsigned int m=1, unsigned long magic=0): """ P = permutations(n, m=1, magic=0). Generate m permutations from [0..n[. """ cdef fff_array *p cdef fff_array *pi cdef fff_array pi_view cdef unsigned int i p = fff_array_new2d(FFF_UINT, n, m) pi = fff_array_new1d(FFF_UINT, n) ## contiguous, dims=(n,1,1,1) for i from 0 <= i < m: fff_permutation(pi.data, n, magic+i) pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) fff_array_copy(&pi_view, pi) P = fff_array_toPyArray(p) return P def combinations(unsigned int k, unsigned int n, unsigned int m=1, unsigned long magic=0): """ P = combinations(k, n, m=1, magic=0). Generate m combinations of k elements from [0..n[. """ cdef fff_array *p, *pi cdef fff_array pi_view cdef unsigned int i p = fff_array_new2d(FFF_UINT, k, m) pi = fff_array_new1d(FFF_UINT, k) ## contiguous, dims=(n,1,1,1) for i from 0 <= i < m: fff_combination(pi.data, k, n, magic+i) pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) fff_array_copy(&pi_view, pi) C = fff_array_toPyArray(p) return C def gamln(double x): """ Python bindings to log gamma. Do not use, this is there only for testing. Use scipy.special.gammaln. """ cdef double y y = fff_gamln(x) return y def psi(double x): """ Python bindings to psi (d gamln(x)/dx. Do not use, this is there only for testing. Use scipy.special.psi. """ cdef double y y = fff_psi(x) return y nipy-0.6.1/nipy/labs/utils/simul_multisubject_fmri_dataset.py000066400000000000000000000251221470056100100245500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module contains a function to produce a dataset which simulates a collection of 2D images This dataset is saved as a 3D image (each slice being a subject) and a 3D array Author : Bertrand Thirion, 2008-2010 """ import numpy as np import scipy.ndimage as nd from nibabel import Nifti1Image, save from nipy.io.nibcompat import get_affine # definition of the maxima at the group level pos = np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([3, 4, 4]) def _cone2d(shape, ij, pos, ampli, width): """Define a cone of the proposed grid """ temp = np.zeros(shape) pos = np.reshape(pos, (1, 2)) dist = np.sqrt(np.sum((ij - pos) ** 2, axis=1)) codi = (width - dist) * (dist < width) / width temp[ij[:, 0], ij[:, 1]] = codi * ampli return temp def _cone3d(shape, ij, pos, ampli, width): """Define a cone of the proposed grid """ temp = np.zeros(shape) pos = np.reshape(pos, (1, 3)) dist = np.sqrt(np.sum((ij - pos) ** 2, axis=1)) codi = (width - dist) * (dist < width) / width temp[ij[:, 0], ij[:, 1], ij[:, 2]] = codi * ampli return temp def surrogate_2d_dataset(n_subj=10, shape=(30, 30), sk=1.0, noise_level=1.0, pos=pos, ampli=ampli, spatial_jitter=1.0, signal_jitter=1.0, width=5.0, width_jitter=0, out_text_file=None, out_image_file=None, seed=False): """ Create surrogate (simulated) 2D activation data with spatial noise Parameters ----------- n_subj: integer, optional The number of subjects, ie the number of different maps generated. shape=(30,30): tuple of integers, the shape of each image sk: float, optional Amount of spatial noise smoothness. noise_level: float, optional Amplitude of the spatial noise. amplitude=noise_level) pos: 2D ndarray of integers, optional x, y positions of the various simulated activations. ampli: 1D ndarray of floats, optional Respective amplitude of each activation spatial_jitter: float, optional Random spatial jitter added to the position of each activation, in pixel. signal_jitter: float, optional Random amplitude fluctuation for each activation, added to the amplitude specified by `ampli` width: float or ndarray, optional Width of the activations width_jitter: float Relative width jitter of the blobs out_text_file: string or None, optional If not None, the resulting array is saved as a text file with the given file name out_image_file: string or None, optional If not None, the resulting is saved as a nifti file with the given file name. seed=False: int, optional If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: 3D ndarray The surrogate activation map, with dimensions ``(n_subj,) + shape`` """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr ij = np.array(np.where(np.ones(shape))).T dataset = [] for s in range(n_subj): # make the signal data = np.zeros(shape) lpos = pos + spatial_jitter * nr.randn(1, 2) lampli = ampli + signal_jitter * nr.randn(np.size(ampli)) this_width = width * (1 - width_jitter * nr.randn(np.size(ampli))) for k in range(np.size(lampli)): data = np.maximum(data, _cone2d(shape, ij, lpos[k], lampli[k], this_width[k])) # make some noise noise = nr.randn(*shape) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise = np.reshape(noise, ( - 1, 1)) noise *= noise_level / np.std(noise) #make the mixture data += np.reshape(noise, shape) dataset.append(data) dataset = np.array(dataset) if out_text_file is not None: dataset.tofile(out_text_file) if out_image_file is not None: save(Nifti1Image(dataset, np.eye(4)), out_image_file) return dataset def surrogate_3d_dataset(n_subj=1, shape=(20, 20, 20), mask=None, sk=1.0, noise_level=1.0, pos=None, ampli=None, spatial_jitter=1.0, signal_jitter=1.0, width=5.0, out_text_file=None, out_image_file=None, seed=False): """Create surrogate (simulated) 3D activation data with spatial noise. Parameters ----------- n_subj: integer, optional The number of subjects, ie the number of different maps generated. shape=(20,20,20): tuple of 3 integers, the shape of each image mask=None: Nifti1Image instance, referential- and mask- defining image (overrides shape) sk: float, optional Amount of spatial noise smoothness. noise_level: float, optional Amplitude of the spatial noise. amplitude=noise_level) pos: 2D ndarray of integers, optional x, y positions of the various simulated activations. ampli: 1D ndarray of floats, optional Respective amplitude of each activation spatial_jitter: float, optional Random spatial jitter added to the position of each activation, in pixel. signal_jitter: float, optional Random amplitude fluctuation for each activation, added to the amplitude specified by ampli width: float or ndarray, optional Width of the activations out_text_file: string or None, optional If not None, the resulting array is saved as a text file with the given file name out_image_file: string or None, optional If not None, the resulting is saved as a nifti file with the given file name. seed=False: int, optional If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: 3D ndarray The surrogate activation map, with dimensions ``(n_subj,) + shape`` """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr if mask is not None: shape = mask.shape mask_data = mask.get_fdata() else: mask_data = np.ones(shape) ijk = np.array(np.where(mask_data)).T dataset = [] # make the signal for s in range(n_subj): data = np.zeros(shape) lampli = [] if pos is not None: if len(pos) != len(ampli): raise ValueError('ampli and pos do not have the same len') lpos = pos + spatial_jitter * nr.randn(1, 3) lampli = ampli + signal_jitter * nr.randn(np.size(ampli)) for k in range(np.size(lampli)): data = np.maximum(data, _cone3d(shape, ijk, lpos[k], lampli[k], width)) # make some noise noise = nr.randn(shape[0], shape[1], shape[2]) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise *= noise_level / np.std(noise) # make the mixture data += noise data[mask_data == 0] = 0 dataset.append(data) dataset = np.array(dataset) if n_subj == 1: dataset = dataset[0] if out_text_file is not None: dataset.tofile(out_text_file) if out_image_file is not None: save(Nifti1Image(dataset, np.eye(4)), out_image_file) return dataset def surrogate_4d_dataset(shape=(20, 20, 20), mask=None, n_scans=1, n_sess=1, dmtx=None, sk=1.0, noise_level=1.0, signal_level=1.0, out_image_file=None, seed=False): """ Create surrogate (simulated) 3D activation data with spatial noise. Parameters ----------- shape = (20, 20, 20): tuple of integers, the shape of each image mask=None: brifti image instance, referential- and mask- defining image (overrides shape) n_scans: int, optional, number of scans to be simlulated overridden by the design matrix n_sess: int, optional, the number of simulated sessions dmtx: array of shape(n_scans, n_rows), the design matrix sk: float, optional Amount of spatial noise smoothness. noise_level: float, optional Amplitude of the spatial noise. amplitude=noise_level) signal_level: float, optional, Amplitude of the signal out_image_file: string or list of strings or None, optional If not None, the resulting is saved as (set of) nifti file(s) with the given file path(s) seed=False: int, optional If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: a list of n_sess ndarray of shape (shape[0], shape[1], shape[2], n_scans) The surrogate activation map """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr if mask is not None: shape = mask.shape affine = get_affine(mask) mask_data = mask.get_fdata().astype('bool') else: affine = np.eye(4) mask_data = np.ones(shape).astype('bool') if dmtx is not None: n_scans = dmtx.shape[0] if (out_image_file is not None) and isinstance(out_image_file, str): out_image_file = [out_image_file] shape_4d = shape + (n_scans,) output_images = [] if dmtx is not None: beta = [] for r in range(dmtx.shape[1]): betar = nd.gaussian_filter(nr.randn(*shape), sk) betar /= np.std(betar) beta.append(signal_level * betar) beta = np.rollaxis(np.array(beta), 0, 4) for ns in range(n_sess): data = np.zeros(shape_4d) # make the signal if dmtx is not None: data[mask_data] += np.dot(beta[mask_data], dmtx.T) for s in range(n_scans): # make some noise noise = nr.randn(*shape) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise *= noise_level / np.std(noise) # make the mixture data[:, :, :, s] += noise data[:, :, :, s] += 100 * mask_data wim = Nifti1Image(data, affine) output_images.append(wim) if out_image_file is not None: save(wim, out_image_file[ns]) return output_images nipy-0.6.1/nipy/labs/utils/tests/000077500000000000000000000000001470056100100166715ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/utils/tests/__init__.py000066400000000000000000000000001470056100100207700ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/utils/tests/test_misc.py000066400000000000000000000033021470056100100212330ustar00rootroot00000000000000 import numpy as np from numpy.testing import TestCase, assert_almost_equal from scipy import special from ..routines import gamln, mahalanobis, median, psi class TestAll(TestCase): def test_median(self): x = np.random.rand(100) assert_almost_equal(median(x), np.median(x)) def test_median2(self): x = np.random.rand(101) assert median(x) == np.median(x) def test_median3(self): x = np.random.rand(10, 30, 11) assert_almost_equal(np.squeeze(median(x,axis=1)), np.median(x,axis=1)) def test_mahalanobis(self): x = np.random.rand(100) / 100 A = np.random.rand(100, 100) / 100 A = np.dot(A.transpose(), A) + np.eye(100) mah = np.dot(x, np.dot(np.linalg.inv(A), x)) assert_almost_equal(mah, mahalanobis(x, A), decimal=1) def test_mahalanobis2(self): x = np.random.rand(100,3,4) Aa = np.zeros([100,100,3,4]) for i in range(3): for j in range(4): A = np.random.rand(100,100) A = np.dot(A.T, A) Aa[:,:,i,j] = A i = np.random.randint(3) j = np.random.randint(4) mah = np.dot(x[:,i,j], np.dot(np.linalg.inv(Aa[:,:,i,j]), x[:,i,j])) f_mah = (mahalanobis(x, Aa))[i,j] assert np.allclose(mah, f_mah) def test_gamln(self): for x in (0.01+100*np.random.random(50)): scipy_gamln = special.gammaln(x) my_gamln = gamln(x) assert_almost_equal(scipy_gamln, my_gamln) def test_psi(self): for x in (0.01+100*np.random.random(50)): scipy_psi = special.psi(x) my_psi = psi(x) assert_almost_equal(scipy_psi, my_psi) nipy-0.6.1/nipy/labs/utils/tests/test_repro.py000066400000000000000000000073121470056100100214340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the design_matrix utilities. Note that the tests just looks whether the data produces has correct dimension, not whether it is exact """ import numpy as np from ..reproducibility_measures import ( cluster_reproducibility, peak_reproducibility, voxel_reproducibility, ) from ..simul_multisubject_fmri_dataset import surrogate_2d_dataset def make_dataset(ampli_factor=1.0, n_subj=10): """ Generate a standard multi-subject as a set of multi-subject 2D maps if null, no activation is added """ n_subj = 10 shape = (40, 40) pos = 2 * np.array([[ 6, 7], [10, 10], [15, 10]]) ampli = ampli_factor * np.array([5, 6, 7]) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=5.0, seed=1) return dataset def apply_repro_analysis(dataset, thresholds=[3.0], method = 'crfx'): """ perform the reproducibility analysis according to the """ from nipy.labs.spatial_models.discrete_domain import grid_domain_from_binary_array n_subj, dimx, dimy = dataset.shape func = np.reshape(dataset,(n_subj, dimx * dimy)).T var = np.ones((dimx * dimy, n_subj)) domain = grid_domain_from_binary_array(np.ones((dimx, dimy, 1))) ngroups = 5 sigma = 2.0 csize = 10 niter = 5 verbose = 0 swap = False kap, clt, pkd = [], [], [] for threshold in thresholds: kappa, cls, pks = [], [], [] kwargs = {'threshold':threshold, 'csize':csize} for i in range(niter): k = voxel_reproducibility(func, var, domain, ngroups, method, swap, verbose, **kwargs) kappa.append(k) cld = cluster_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) cls.append(cld) pk = peak_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) pks.append(pk) kap.append(np.array(kappa)) clt.append(np.array(cls)) pkd.append(np.array(pks)) kap = np.array(kap) clt = np.array(clt) pkd = np.array(pkd) return kap, clt, pkd def test_repro1(): """ Test on the kappa values for a standard dataset using bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset) assert ((kap.mean() > 0.3) & (kap.mean() < 0.9)) assert (pks.mean() > 0.4) def test_repro2(): """ Test on the cluster reproducibility values for a standard dataset using cluster-level rfx, bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset, thresholds=[5.0]) assert (clt.mean()>0.5) def test_repro3(): """ Test on the kappa values for a null dataset using cluster-level rfx, bootstrap """ dataset = make_dataset(ampli_factor=0) kap, clt, pks = apply_repro_analysis(dataset, thresholds=[4.0]) assert (kap.mean(1) < 0.3) assert (clt.mean(1) < 0.3) def test_repro5(): """ Test on the kappa values for a non-null dataset using cluster-level mfx, bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset, method='cmfx') assert (kap.mean(1) > 0.5) assert (clt.mean(1) > 0.5) def test_repro7(): """ Test on the kappa values for a standard dataset using jacknife subsampling """ dataset = make_dataset(n_subj = 101) kap, clt, pks = apply_repro_analysis(dataset, thresholds=[5.0]) assert (kap.mean() > 0.4) assert (clt.mean() > 0.5) nipy-0.6.1/nipy/labs/utils/tests/test_simul_multisubject_fmri_dataset.py000066400000000000000000000104051470056100100267470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test surrogate data generation. """ import numpy as np from nibabel import Nifti1Image from ..simul_multisubject_fmri_dataset import ( surrogate_2d_dataset, surrogate_3d_dataset, surrogate_4d_dataset, ) def test_surrogate_array(): """ Check that with no noise, the surrogate activation correspond to the ones that we specify. 2D version """ # We can't use random positions, as the positions have to be # far-enough not to overlap. pos = np.array([[ 2, 10], [10, 4], [19, 15], [15, 19], [5, 18]]) ampli = np.random.random(5) data = surrogate_2d_dataset(n_subj=1, noise_level=0, spatial_jitter=0, signal_jitter=0, pos=pos, shape=(20,20), ampli=ampli).squeeze() x, y = pos.T np.testing.assert_array_equal(data[x, y], ampli) def test_surrogate_array_3d(): """ Check that with no noise, the surrogate activation correspond to the ones that we specify. 3D version """ # We can't use random positions, as the positions have to be # far-enough not to overlap. pos = np.array([[ 2, 10, 2], [10, 4, 4], [18, 13, 18], [13, 18, 5], [5, 18, 18]]) ampli = np.random.random(5) data = surrogate_3d_dataset(n_subj=1, noise_level=0, spatial_jitter=0, signal_jitter=0, pos=pos, shape=(20,20,20), ampli=ampli).squeeze() x, y, z = pos.T np.testing.assert_array_equal(data[x, y, z], ampli) def test_surrogate_array_3d_write(): """ Check that 3D version spits files when required """ from os import path from tempfile import mkdtemp write_path = path.join(mkdtemp(), 'img.nii') shape = (5, 6, 7) data = surrogate_3d_dataset(shape=shape, out_image_file=write_path) assert path.isfile(write_path) def test_surrogate_array_3d_mask(): """ Check that 3D version works when a mask is provided """ shape = (5, 6, 7) mask = np.random.rand(*shape) > 0.5 mask_img = Nifti1Image(mask.astype(np.uint8), np.eye(4)) img = surrogate_3d_dataset(mask=mask_img) mean_image = img[mask].mean() assert (img[mask == 0] == 0).all() def test_surrogate_array_4d_shape(): """Run the 4D datageneration; check the output shape and length """ shape = (5, 6, 7) out_shape = shape + (1,) imgs = surrogate_4d_dataset(shape) assert not np.any(np.asarray(imgs[0].shape) - np.asarray(out_shape)) n_sess = 3 imgs = surrogate_4d_dataset(shape, n_sess=n_sess) assert imgs[0].shape == out_shape assert len(imgs) == n_sess n_scans = 5 out_shape = shape + (n_scans,) imgs = surrogate_4d_dataset(shape, n_scans=n_scans) assert imgs[0].shape == (out_shape) def test_surrogate_array_4d_write(): """Run the 4D data generation; check that output images are written """ from os import path from tempfile import mkdtemp n_sess = 3 write_paths = [path.join(mkdtemp(), 'img_%d.nii' % i) for i in range(n_sess)] shape = (5, 6, 7) imgs = surrogate_4d_dataset(shape, out_image_file=write_paths[0]) assert path.isfile(write_paths[0]) imgs = surrogate_4d_dataset(shape, n_sess=n_sess, out_image_file=write_paths) for wp in write_paths: assert path.isfile(wp) def test_surrogate_array_4d_mask(): """Run the 4D version, with masking """ shape = (5, 5, 5) mask = np.random.rand(*shape) > 0.5 mask_img = Nifti1Image(mask.astype(np.uint8), np.eye(4)) imgs = surrogate_4d_dataset(mask=mask_img) mean_image = imgs[0].get_fdata()[mask].mean() assert (imgs[0].get_fdata()[mask == 0] < mean_image / 2).all() def test_surrogate_array_4d_dmtx(): """Run the 4D version, with design_matrix provided """ shape = (5, 5, 5) n_scans = 25 out_shape = shape + (n_scans,) dmtx = np.random.randn(n_scans, 3) imgs = surrogate_4d_dataset(shape, dmtx=dmtx) assert not np.any(np.asarray(imgs[0].shape) - np.asarray(out_shape)) nipy-0.6.1/nipy/labs/utils/zscore.py000066400000000000000000000005601470056100100174070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats TINY = 1e-15 def zscore(pvalue): """ Return the z-score corresponding to a given p-value. """ pvalue = np.minimum(np.maximum(pvalue, TINY), 1. - TINY) z = scipy.stats.norm.isf(pvalue) return z nipy-0.6.1/nipy/labs/viz.py000066400000000000000000000006321470056100100155520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Entry point for doing 2D visualization with NiPy. """ from .viz_tools import cm from .viz_tools.activation_maps import demo_plot_map, plot_anat, plot_map from .viz_tools.anat_cache import mni_sform # XXX: These should die from .viz_tools.coord_tools import coord_transform, find_cut_coords nipy-0.6.1/nipy/labs/viz3d.py000066400000000000000000000004171470056100100160020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D plotting of neuroimaging volumes. """ from .viz_tools.maps_3d import ( affine_img_src, demo_plot_map_3d, plot_anat_3d, plot_map_3d, ) nipy-0.6.1/nipy/labs/viz_tools/000077500000000000000000000000001470056100100164175ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/viz_tools/__init__.py000066400000000000000000000000001470056100100205160ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/viz_tools/activation_maps.py000066400000000000000000000361611470056100100221610ustar00rootroot00000000000000""" Functions to do automatic visualization of activation-like maps. For 2D-only visualization, only matplotlib is required. For 3D visualization, Mayavi, version 3.0 or greater, is required. For a demo, see the 'demo_plot_map' function. """ # Author: Gael Varoquaux # License: BSD # Standard library imports import numbers import warnings import matplotlib.pyplot as plt # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from nipy.utils import is_numlike from .anat_cache import _AnatCache, mni_sform, mni_sform_inv from .coord_tools import coord_transform, find_maxsep_cut_coords from .edge_detect import _fast_abs_percentile from .slicers import SLICERS, _xyz_order ################################################################################ # Helper functions for 2D plotting of activation maps ################################################################################ def plot_map(map, affine, cut_coords=None, anat=None, anat_affine=None, slicer='ortho', figure=None, axes=None, title=None, threshold=None, annotate=True, draw_cross=True, do3d=False, threshold_3d=None, view_3d=(38.5, 70.5, 300, (-2.7, -12, 9.1)), black_bg=False, **imshow_kwargs): """ Plot three cuts of a given activation map (Frontal, Axial, and Lateral) Parameters ---------- map : 3D ndarray The activation map, as a 3D image. affine : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: None, int, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If slicer is 'ortho', this should be a 3-tuple: (x, y, z) For slicer == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None or an int is given, then a maximally separated sequence ( with exactly cut_coords elements if cut_coords is not None) of cut coordinates along the slicer axis is computed automatically anat : 3D ndarray or False, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anat is displayed. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicit anatomical image. slicer: {'ortho', 'x', 'y', 'z'} Choose the direction of the cuts. With 'ortho' three cuts are performed in orthogonal directions figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. threshold : a number, None, or 'auto' If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the map. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. do3d: {True, False or 'interactive'}, optional If True, Mayavi is used to plot a 3D view of the map in addition to the slicing. If 'interactive', the 3D visualization is displayed in an additional interactive window. threshold_3d: The threshold to use for the 3D view (if any). Defaults to the same threshold as that used for the 2D view. view_3d: tuple, The view used to take the screenshot: azimuth, elevation, distance and focalpoint, see the docstring of mlab.view. black_bg: boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pyplot's savefig. imshow_kwargs: extra keyword arguments, optional Extra keyword arguments passed to pyplot.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. Use masked arrays to create transparency: import numpy as np map = np.ma.masked_less(map, 0.5) plot_map(map, affine) """ map, affine = _xyz_order(map, affine) nan_mask = np.isnan(np.asarray(map)) if np.any(nan_mask): map = map.copy() map[nan_mask] = 0 del nan_mask # Deal with automatic settings of plot parameters if threshold == 'auto': # Threshold epsilon above a percentile value, to be sure that some # voxels are indeed threshold threshold = _fast_abs_percentile(map) + 1e-5 if do3d: try: try: from mayavi import version except ImportError: from enthought.mayavi import version if not int(version.version[0]) > 2: raise ImportError except ImportError: warnings.warn('Mayavi > 3.x not installed, plotting only 2D') do3d = False if (cut_coords is None or isinstance(cut_coords, numbers.Number) ) and slicer in ['x', 'y', 'z']: cut_coords = find_maxsep_cut_coords(map, affine, slicer=slicer, threshold=threshold, n_cuts=cut_coords) slicer = SLICERS[slicer].init_with_figure(data=map, affine=affine, threshold=threshold, cut_coords=cut_coords, figure=figure, axes=axes, black_bg=black_bg, leave_space=do3d) # Use Mayavi for the 3D plotting if do3d: from .maps_3d import m2screenshot, plot_map_3d try: from tvtk.api import tvtk except ImportError: from enthought.tvtk.api import tvtk version = tvtk.Version() offscreen = True if (version.vtk_major_version, version.vtk_minor_version) < (5, 2): offscreen = False if do3d == 'interactive': offscreen = False cmap = imshow_kwargs.get('cmap', plt.get_cmap(plt.rcParams['image.cmap'])) # Computing vmin and vmax is costly in time, and is needed # later, so we compute them now, and store them for future # use vmin = imshow_kwargs.get('vmin', map.min()) imshow_kwargs['vmin'] = vmin vmax = imshow_kwargs.get('vmax', map.max()) imshow_kwargs['vmax'] = vmax try: from mayavi import mlab except ImportError: from enthought.mayavi import mlab if threshold_3d is None: threshold_3d = threshold plot_map_3d(np.asarray(map), affine, cut_coords=cut_coords, anat=anat, anat_affine=anat_affine, offscreen=offscreen, cmap=cmap, threshold=threshold_3d, view=view_3d, vmin=vmin, vmax=vmax) ax = list(slicer.axes.values())[0].ax.figure.add_axes((0.001, 0, 0.29, 1)) ax.axis('off') m2screenshot(mpl_axes=ax) if offscreen: # Clean up, so that the offscreen engine doesn't become the # default mlab.clf() engine = mlab.get_engine() try: from mayavi.core.registry import registry except: from enthought.mayavi.core.registry import registry for key, value in registry.engines.items(): if value is engine: registry.engines.pop(key) break if threshold: map = np.ma.masked_inside(map, -threshold, threshold, copy=False) _plot_anat(slicer, anat, anat_affine, title=title, annotate=annotate, draw_cross=draw_cross) slicer.plot_map(map, affine, **imshow_kwargs) return slicer def _plot_anat(slicer, anat, anat_affine, title=None, annotate=True, draw_cross=True, dim=False, cmap=plt.cm.gray, **imshow_kwargs): """ Internal function used to plot anatomy """ canonical_anat = False if anat is None: try: anat, anat_affine, vmax_anat = _AnatCache.get_anat() canonical_anat = True except OSError as e: anat = False warnings.warn(repr(e)) black_bg = slicer._black_bg # XXX: Check that we should indeed plot an anat: we have one, and the # cut_coords are in its range if anat is not False: if canonical_anat: # We special-case the 'canonical anat', as we don't need # to do a few transforms to it. vmin = 0 vmax = vmax_anat elif dim: vmin = anat.min() vmax = anat.max() else: vmin = None vmax = None anat, anat_affine = _xyz_order(anat, anat_affine) if dim: vmean = .5*(vmin + vmax) ptp = .5*(vmax - vmin) if not is_numlike(dim): dim = .6 if black_bg: vmax = vmean + (1+dim)*ptp else: vmin = vmean - (1+dim)*ptp slicer.plot_map(anat, anat_affine, cmap=cmap, vmin=vmin, vmax=vmax, **imshow_kwargs) if annotate: slicer.annotate() if draw_cross: slicer.draw_cross() if black_bg: # To have a black background in PDF, we need to create a # patch in black for the background for ax in slicer.axes.values(): ax.ax.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500) if title is not None and title != '': slicer.title(title) return slicer def plot_anat(anat=None, anat_affine=None, cut_coords=None, slicer='ortho', figure=None, axes=None, title=None, annotate=True, draw_cross=True, black_bg=False, dim=False, cmap=plt.cm.gray, **imshow_kwargs): """ Plot three cuts of an anatomical image (Frontal, Axial, and Lateral) Parameters ---------- anat : 3D ndarray, optional The anatomical image to be used as a background. If None is given, nipy tries to find a T1 template. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicit anatomical image. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. cut_coords: None, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If slicer is 'ortho', this should be a 3-tuple: (x, y, z) For slicer == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automatically. slicer: {'ortho', 'x', 'y', 'z'} Choose the direction of the cuts. With 'ortho' three cuts are performed in orthogonal directions figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg: boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pyplot's savefig. dim: float, optional If set, dim the anatomical image, such that vmax = vmean + (1+dim)*ptp if black_bg is set to True, or vmin = vmean - (1+dim)*ptp otherwise, where ptp = .5*(vmax - vmin) cmap: matplotlib colormap, optional The colormap for the anat imshow_kwargs: extra keyword arguments, optional Extra keyword arguments passed to pyplot.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. """ slicer = SLICERS[slicer].init_with_figure(data=anat, affine=anat_affine, threshold=0, cut_coords=cut_coords, figure=figure, axes=axes, black_bg=black_bg) _plot_anat(slicer, anat, anat_affine, title=title, annotate=annotate, draw_cross=draw_cross, dim=dim, cmap=cmap, **imshow_kwargs) return slicer def demo_plot_map(do3d=False, **kwargs): """ Demo activation map plotting. """ map = np.zeros((182, 218, 182)) # Color a asymmetric rectangle around Broca area: x, y, z = -52, 10, 22 mapped = coord_transform(x, y, z, mni_sform_inv) x_map, y_map, z_map = (int(v) for v in mapped) # Compare to values obtained using fslview. We need to add one as # voxels do not start at 0 in fslview. assert x_map == 142 assert y_map + 1 == 137 assert z_map + 1 == 95 map[x_map-5:x_map+5, y_map-3:y_map+3, z_map-10:z_map+10] = 1 return plot_map(map, mni_sform, threshold='auto', title="Broca's area", do3d=do3d, **kwargs) nipy-0.6.1/nipy/labs/viz_tools/anat_cache.py000066400000000000000000000065251470056100100210470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D visualization of activation maps using Mayavi """ # Author: Gael Varoquaux # License: BSD # Standard library imports import os # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from nibabel import load from scipy import ndimage from nipy.io.nibcompat import get_affine # The sform for MNI templates mni_sform = np.array([[-1, 0, 0, 90], [ 0, 1, 0, -126], [ 0, 0, 1, -72], [ 0, 0, 0, 1]]) mni_sform_inv = np.linalg.inv(mni_sform) def find_mni_template(): """ Try to find an MNI template on the disk. """ from nipy.utils import DataError, templates try: filename = templates.get_filename( 'ICBM152', '1mm', 'T1_brain.nii.gz') if os.path.exists(filename): return filename except DataError: pass possible_paths = [ ('', 'usr', 'share', 'fsl', 'data', 'standard', 'avg152T1_brain.nii.gz'), ('', 'usr', 'share', 'data', 'fsl-mni152-templates', 'avg152T1_brain.nii.gz'), ('', 'usr', 'local', 'share', 'fsl', 'data', 'standard', 'avg152T1_brain.nii.gz'), ] if 'FSLDIR' in os.environ: fsl_path = os.environ['FSLDIR'].split(os.sep) fsl_path.extend(('data', 'standard', 'avg152T1_brain.nii.gz')) possible_paths.append(fsl_path) for path in possible_paths: filename = os.sep.join(path) if os.path.exists(filename): return filename ################################################################################ # Caching of the MNI template. ################################################################################ class _AnatCache: """ Class to store the anat array in cache, to avoid reloading it each time. """ anat = None anat_sform = None blurred = None @classmethod def get_anat(cls): filename = find_mni_template() if cls.anat is None: if filename is None: raise OSError('Cannot find template file T1_brain.nii.gz ' 'required to plot anatomy, see the nipy documentation ' 'installaton section for how to install template files.') anat_im = load(filename) anat = anat_im.get_fdata() anat = anat.astype(np.float64) anat_mask = ndimage.morphology.binary_fill_holes(anat > 0) anat = np.ma.masked_array(anat, np.logical_not(anat_mask)) cls.anat_sform = get_affine(anat_im) cls.anat = anat cls.anat_max = anat.max() return cls.anat, cls.anat_sform, cls.anat_max @classmethod def get_blurred(cls): if cls.blurred is not None: return cls.blurred anat, _, _ = cls.get_anat() cls.blurred = ndimage.gaussian_filter( (ndimage.morphology.binary_fill_holes( ndimage.gaussian_filter( (anat > 4800).astype(np.float64), 6) > 0.5 )).astype(np.float64), 2).T.ravel() return cls.blurred nipy-0.6.1/nipy/labs/viz_tools/cm.py000066400000000000000000000226221470056100100173740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Matplotlib colormaps useful for neuroimaging. """ import numpy as _np from matplotlib import cm as _cm from matplotlib import colors as _colors from matplotlib import rcParams as _rc ################################################################################ # Custom colormaps for two-tailed symmetric statistics ################################################################################ ################################################################################ # Helper functions def _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to swap the colors of a colormap. """ orig_cdict = cmap._segmentdata.copy() cdict = {} cdict['green'] = list(orig_cdict[swap_order[0]]) cdict['blue'] = list(orig_cdict[swap_order[1]]) cdict['red'] = list(orig_cdict[swap_order[2]]) return cdict def _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to make a new colormap by concatenating a colormap with its reverse. """ orig_cdict = cmap._segmentdata.copy() cdict = {} cdict['green'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] cdict['blue'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] cdict['red'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] for color in ('red', 'green', 'blue'): cdict[color].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in orig_cdict[color]]) return cdict def _concat_cmap(cmap1, cmap2): """ Utility function to make a new colormap by concatenating two colormaps. """ cdict = {} cdict1 = cmap1._segmentdata.copy() cdict2 = cmap2._segmentdata.copy() if not hasattr(cdict1['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]] else: for c in ['red', 'green', 'blue']: cdict[c] = [] ps = _np.linspace(0, 1, 10) colors = cmap1(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*p, r, r)) cdict['green'].append((.5*p, g, g)) cdict['blue'].append((.5*p, b, b)) if not hasattr(cdict2['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]]) else: ps = _np.linspace(0, 1, 10) colors = cmap2(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*(1+p), r, r)) cdict['green'].append((.5*(1+p), g, g)) cdict['blue'].append((.5*(1+p), b, b)) return cdict def alpha_cmap(color, name=''): """ Return a colormap with the given color, and alpha going from zero to 1. Parameters ---------- color: (r, g, b), or a string A triplet of floats ranging from 0 to 1, or a matplotlib color string """ red, green, blue = _colors.colorConverter.to_rgb(color) if name == '' and hasattr(color, 'startswith'): name = color cmapspec = [(red, green, blue, 0.), (red, green, blue, 1.), ] cmap = _colors.LinearSegmentedColormap.from_list( f'{name}_transparent', cmapspec, _rc['image.lut']) cmap._init() cmap._lut[:, -1] = _np.linspace(.5, 1.0, cmap._lut.shape[0]) cmap._lut[-1, -1] = 0 return cmap ################################################################################ # Our colormaps definition _cmaps_data = { 'cold_hot': _pigtailed_cmap(_cm.hot), 'brown_blue': _pigtailed_cmap(_cm.bone), 'cyan_copper': _pigtailed_cmap(_cm.copper), 'cyan_orange': _pigtailed_cmap(_cm.YlOrBr_r), 'blue_red': _pigtailed_cmap(_cm.Reds_r), 'brown_cyan': _pigtailed_cmap(_cm.Blues_r), 'purple_green': _pigtailed_cmap(_cm.Greens_r, swap_order=('red', 'blue', 'green')), 'purple_blue': _pigtailed_cmap(_cm.Blues_r, swap_order=('red', 'blue', 'green')), 'blue_orange': _pigtailed_cmap(_cm.Oranges_r, swap_order=('green', 'red', 'blue')), 'black_blue': _rotate_cmap(_cm.hot), 'black_purple': _rotate_cmap(_cm.hot, swap_order=('blue', 'red', 'green')), 'black_pink': _rotate_cmap(_cm.hot, swap_order=('blue', 'green', 'red')), 'black_green': _rotate_cmap(_cm.hot, swap_order=('red', 'blue', 'green')), 'black_red': _cm.hot._segmentdata.copy(), } if hasattr(_cm, 'ocean'): # MPL 0.99 doesn't have Ocean _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r) if hasattr(_cm, 'afmhot'): # or afmhot _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r) _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone) ################################################################################ # Build colormaps and their reverse. _cmap_d = {} for _cmapname in list(_cmaps_data): _cmapname_r = _cmapname + '_r' _cmapspec = _cmaps_data[_cmapname] if 'red' in _cmapspec: _cmap_d[_cmapname] = _colors.LinearSegmentedColormap( _cmapname, _cmapspec, _rc['image.lut']) _cmap_d[_cmapname_r] = _cmap_d[_cmapname].reversed(name=_cmapname_r) else: _revspec = list(reversed(_cmapspec)) if len(_revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0)) _revspec = [(1.0 - a, b) for a, b in _revspec] _cmap_d[_cmapname] = _colors.LinearSegmentedColormap.from_list( _cmapname, _cmapspec, _rc['image.lut']) _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap.from_list( _cmapname_r, _revspec, _rc['image.lut']) ################################################################################ # A few transparent colormaps for color, name in (((1, 0, 0), 'red'), ((0, 1, 0), 'blue'), ((0, 0, 1), 'green'), ): _cmap_d[f'{name}_transparent'] = alpha_cmap(color, name=name) locals().update(_cmap_d) ################################################################################ # Utility to replace a colormap by another in an interval ################################################################################ def dim_cmap(cmap, factor=.3, to_white=True): """ Dim a colormap to white, or to black. """ assert factor >= 0 and factor <=1, ValueError( f'Dimming factor must be larger than 0 and smaller than 1, {factor} was passed.') if to_white: dimmer = lambda c: 1 - factor*(1-c) else: dimmer = lambda c: factor*c cdict = cmap._segmentdata.copy() for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = [] for value, c1, c2 in cdict[color]: color_lst.append((value, dimmer(c1), dimmer(c2))) cdict[color] = color_lst return _colors.LinearSegmentedColormap( f'{cmap.name}_dimmed', cdict, _rc['image.lut']) def replace_inside(outer_cmap, inner_cmap, vmin, vmax): """ Replace a colormap by another inside a pair of values. """ assert vmin < vmax, ValueError('vmin must be smaller than vmax') assert vmin >= 0, ValueError(f'vmin must be larger than 0, {vmin} was passed.') assert vmax <= 1, ValueError(f'vmax must be smaller than 1, {vmax} was passed.') outer_cdict = outer_cmap._segmentdata.copy() inner_cdict = inner_cmap._segmentdata.copy() cdict = {} for this_cdict, cmap in [(outer_cdict, outer_cmap), (inner_cdict, inner_cmap)]: if hasattr(this_cdict['red'], '__call__'): ps = _np.linspace(0, 1, 25) colors = cmap(ps) this_cdict['red'] = [] this_cdict['green'] = [] this_cdict['blue'] = [] for p, (r, g, b, a) in zip(ps, colors): this_cdict['red'].append((p, r, r)) this_cdict['green'].append((p, g, g)) this_cdict['blue'].append((p, b, b)) for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = [] for value, c1, c2 in outer_cdict[color]: if value >= vmin: break color_lst.append((value, c1, c2)) color_lst.append((vmin, outer_cmap(vmin)[c_index], inner_cmap(vmin)[c_index])) for value, c1, c2 in inner_cdict[color]: if value <= vmin: continue if value >= vmax: break color_lst.append((value, c1, c2)) color_lst.append((vmax, inner_cmap(vmax)[c_index], outer_cmap(vmax)[c_index])) for value, c1, c2 in outer_cdict[color]: if value <= vmax: continue color_lst.append((value, c1, c2)) cdict[color] = color_lst return _colors.LinearSegmentedColormap( f'{inner_cmap.name}_inside_{outer_cmap.name}', cdict, _rc['image.lut']) nipy-0.6.1/nipy/labs/viz_tools/coord_tools.py000066400000000000000000000211521470056100100213200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Misc tools to find activations and cut on maps """ # Author: Gael Varoquaux # License: BSD import warnings # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import ndimage, stats from ..datasets.transforms.affine_utils import get_bounds # Local imports from ..mask import largest_cc ################################################################################ # Functions for automatic choice of cuts coordinates ################################################################################ def coord_transform(x, y, z, affine): """ Convert x, y, z coordinates from one image space to another space. Warning: x, y and z have Talairach ordering, not 3D numpy image ordering. Parameters ---------- x : number or ndarray The x coordinates in the input space y : number or ndarray The y coordinates in the input space z : number or ndarray The z coordinates in the input space affine : 2D 4x4 ndarray affine that maps from input to output space. Returns ------- x : number or ndarray The x coordinates in the output space y : number or ndarray The y coordinates in the output space z : number or ndarray The z coordinates in the output space """ coords = np.c_[np.atleast_1d(x).flat, np.atleast_1d(y).flat, np.atleast_1d(z).flat, np.ones_like(np.atleast_1d(z).flat)].T x, y, z, _ = np.dot(affine, coords) return x.squeeze(), y.squeeze(), z.squeeze() def find_cut_coords(map, mask=None, activation_threshold=None): """ Find the center of the largest activation connect component. Parameters ----------- map : 3D ndarray The activation map, as a 3D image. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using find_activation. Returns ------- x: float the x coordinate in voxels. y: float the y coordinate in voxels. z: float the z coordinate in voxels. """ # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(map, 'mask'): not_mask = np.logical_not(map.mask) if mask is None: mask = not_mask else: mask *= not_mask map = np.asarray(map) my_map = map.copy() if mask is not None: slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5*np.array(map.shape) if activation_threshold is None: activation_threshold = stats.scoreatpercentile( np.abs(my_map[my_map !=0]).ravel(), 80) mask = np.abs(my_map) > activation_threshold-1.e-15 mask = largest_cc(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # although it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map)>second_threshold) if second_mask.sum() > 50: my_map *= largest_cc(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) return cut_coords + offset ################################################################################ def get_mask_bounds(mask, affine): """ Return the world-space bounds occupied by a mask given an affine. Notes ----- The mask should have only one connect component. The affine should be diagonal or diagonal-permuted. """ (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine) slices = ndimage.find_objects(mask) if len(slices) == 0: warnings.warn("empty mask", stacklevel=2) else: x_slice, y_slice, z_slice = slices[0] x_width, y_width, z_width = mask.shape xmin, xmax = (xmin + x_slice.start*(xmax - xmin)/x_width, xmin + x_slice.stop *(xmax - xmin)/x_width) ymin, ymax = (ymin + y_slice.start*(ymax - ymin)/y_width, ymin + y_slice.stop *(ymax - ymin)/y_width) zmin, zmax = (zmin + z_slice.start*(zmax - zmin)/z_width, zmin + z_slice.stop *(zmax - zmin)/z_width) return xmin, xmax, ymin, ymax, zmin, zmax def _maximally_separated_subset(x, k): """ Given a set of n points x = {x_1, x_2, ..., x_n} and a positive integer k < n, this function returns a subset of k points which are maximally spaced. Returns ------- msssk: 1D array of k floats computed maximally-separated subset of k elements from x """ # base cases if k < 1: raise ValueError("k = %i < 1 is senseless." % k) if k == 1: return [x[len(x) // 2]] # would-be maximally separated subset of k (not showing the terminal nodes) msss = list(range(1, len(x) - 1)) # sorting is necessary for the heuristic to work x = np.sort(x) # iteratively delete points x_j of msss, for which x_(j + 1) - x_(j - 1) is # smallest, until only k - 2 points survive while len(msss) + 2 > k: # survivors y = np.array([x[0]] + list(x[msss]) + [x[-1]]) # remove most troublesome point msss = np.delete(msss, np.argmin(y[2:] - y[:-2])) # return maximally separated subset of k elements return x[[0] + list(msss) + [len(x) - 1]] def find_maxsep_cut_coords(map3d, affine, slicer='z', n_cuts=None, threshold=None): """ Heuristic finds `n_cuts` with max separation along a given axis Parameters ---------- map3d : 3D array the data under consideration affine : array shape (4, 4) Affine mapping between array coordinates of `map3d` and real-world coordinates. slicer : string, optional sectional slicer; possible values are "x", "y", or "z" n_cuts : None or int >= 1, optional Number of cuts in the plot; if None, then a default value of 5 is forced. threshold : None or float, optional Thresholding to be applied to the map. Values less than `threshold` set to 0. If None, no thresholding applied. Returns ------- cuts : 1D array of length `n_cuts` the computed cuts Raises ------ ValueError: If `slicer` not in 'xyz' ValueError If `ncuts` < 1 """ if n_cuts is None: n_cuts = 5 if n_cuts < 1: raise ValueError("n_cuts = %i < 1 is senseless." % n_cuts) # sanitize slicer if slicer not in 'xyz': raise ValueError( f"slicer must be one of 'x', 'y', and 'z', got '{slicer}'.") slicer = "xyz".index(slicer) # load data if map3d.ndim != 3: raise TypeError( "map3d must be 3D array, got shape %iD" % map3d.ndim) _map3d = np.rollaxis(map3d.copy(), slicer, start=3) _map3d = np.abs(_map3d) if threshold is not None: _map3d[_map3d < threshold] = 0 # count activated voxels per plane n_activated_voxels_per_plane = np.array([(_map3d[..., z] > 0).sum() for z in range(_map3d.shape[-1])]) perm = np.argsort(n_activated_voxels_per_plane) n_activated_voxels_per_plane = n_activated_voxels_per_plane[perm] good_planes = np.nonzero(n_activated_voxels_per_plane > 0)[0] good_planes = perm[::-1][:n_cuts * 4 if n_cuts > 1 else 1] # cast into coord space good_planes = np.array([ # map cut coord into native space np.dot(affine, np.array([0, 0, 0, 1] # origin ) + coord * np.eye(4)[slicer])[slicer] for coord in good_planes]) # compute cut_coords maximally-separated planes return _maximally_separated_subset(good_planes, n_cuts) nipy-0.6.1/nipy/labs/viz_tools/edge_detect.py000066400000000000000000000120101470056100100212170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Edget detection routines """ import warnings import numpy as np from scipy import ndimage, signal try: # Protect this import as it is compiled code from nipy.algorithms.statistics import quantile except ImportError as e: warnings.warn(f'Could not import fast quantile function: {e}') quantile = None ################################################################################ # Edge detection def _fast_abs_percentile(map, percentile=80): """ A fast version of the percentile of the absolute value. """ if hasattr(map, 'mask'): map = np.asarray(map[np.logical_not(map.mask)]) map = np.abs(map) map = map.ravel() if quantile is not None: return quantile(map, .01*percentile) map.sort() nb = map.size return map[int(.01*percentile*nb)] def _orientation_kernel(t): """ structure elements for calculating the value of neighbors in several directions """ sin = np.sin pi = np.pi t = pi*t arr = np.array([[sin(t), sin(t+.5*pi), sin(t+pi) ], [sin(t+1.5*pi), 0, sin(t+1.5*pi)], [sin(t+pi), sin(t+.5*pi), sin(t) ]]) return np.round(.5*(1+arr)**2).astype(np.bool_) def _edge_detect(image, high_threshold=.75, low_threshold=.4): """ Edge detection for 2D images based on Canny filtering. Parameters ========== image: 2D array The image on which edge detection is applied high_threshold: float, optional The quantile defining the upper threshold of the hysteries thresholding: decrease this to keep more edges low_threshold: float, optional The quantile defining the lower threshold of the hysteries thresholding: decrease this to extract wider edges Returns ======== grad_mag: 2D array of floats The magnitude of the gradient edge_mask: 2D array of booleans A mask of where have edges been detected Notes ====== This function is based on a Canny filter, however it has been tailored to visualization purposes on brain images: don't use it in the general case. It computes the norm of the gradient, extracts the ridge by keeping only local maximum in each direction, and performs hysteresis filtering to keep only edges with high gradient magnitude. """ # This code is loosely based on code by Stefan van der Waalt # Convert to floats to avoid overflows np_err = np.seterr(all='ignore') img = signal.wiener(image.astype(np.float64)) np.seterr(**np_err) # Where the noise variance is 0, Wiener can create nans img[np.isnan(img)] = image[np.isnan(img)] img /= img.max() grad_x = ndimage.sobel(img, mode='constant', axis=0) grad_y = ndimage.sobel(img, mode='constant', axis=1) grad_mag = np.sqrt(grad_x**2 + grad_y**2) grad_angle = np.arctan2(grad_y, grad_x) # Scale the angles in the range [0, 2] grad_angle = (grad_angle + np.pi) / np.pi # Non-maximal suppression: an edge pixel is only good if its magnitude is # greater than its neighbors normal to the edge direction. thinner = np.zeros(grad_mag.shape, dtype=np.bool_) for angle in np.arange(0, 2, .25): thinner = thinner | ( (grad_mag > .85*ndimage.maximum_filter(grad_mag, footprint=_orientation_kernel(angle))) & (((grad_angle - angle) % 2) < .75) ) # Remove the edges next to the side of the image: they are not reliable thinner[0] = 0 thinner[-1] = 0 thinner[:, 0] = 0 thinner[:, -1] = 0 thinned_grad = thinner * grad_mag # Hysteresis thresholding: find seeds above a high threshold, then # expand out until we go below the low threshold grad_values = thinned_grad[thinner] high = thinned_grad > _fast_abs_percentile(grad_values, 100*high_threshold) low = thinned_grad > _fast_abs_percentile(grad_values, 100*low_threshold) edge_mask = ndimage.binary_dilation(high, structure=np.ones((3, 3)), iterations=-1, mask=low) return grad_mag, edge_mask def _edge_map(image): """ Return a maps of edges suitable for visualization. Parameters ========== image: 2D array The image that the edges are extracted from. Returns ======== edge_mask: 2D masked array A mask of the edge as a masked array with parts without edges masked and the large extents detected with lower coefficients. """ edge_mask = _edge_detect(image)[-1] edge_mask = edge_mask.astype(np.float64) edge_mask = -np.sqrt(ndimage.distance_transform_cdt(edge_mask)) edge_mask[edge_mask != 0] -= -.05+edge_mask.min() edge_mask = np.ma.masked_less(edge_mask, .01) return edge_mask nipy-0.6.1/nipy/labs/viz_tools/maps_3d.py000066400000000000000000000344401470056100100203240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D visualization of activation maps using Mayavi """ # Author: Gael Varoquaux # License: BSD import os import tempfile # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import stats # Local imports from .anat_cache import _AnatCache, mni_sform, mni_sform_inv from .coord_tools import coord_transform # A module global to avoid creating multiple time an offscreen engine. off_screen_engine = None ################################################################################ # Helper functions def affine_img_src(data, affine, scale=1, name='AffineImage', reverse_x=False): """ Make a Mayavi source defined by a 3D array and an affine, for which the voxel of the 3D array are mapped by the affine. Parameters ----------- data: 3D ndarray The data arrays affine: (4 x 4) ndarray The (4 x 4) affine matrix relating voxels to world coordinates. scale: float, optional An optional addition scaling factor. name: string, optional The name of the Mayavi source created. reverse_x: boolean, optional Reverse the x (lateral) axis. Useful to compared with images in radiologic convention. Notes ------ The affine should be diagonal. """ # Late import to avoid triggering wx imports before needed. try: from mayavi.sources.api import ArraySource except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi.sources.api import ArraySource center = np.r_[0, 0, 0, 1] spacing = np.diag(affine)[:3].copy() origin = np.dot(affine, center)[:3] if reverse_x: # Radiologic convention spacing[0] *= -1 origin[0] *= -1 src = ArraySource(scalar_data=np.asarray(data, dtype=np.float64), name=name, spacing=scale*spacing, origin=scale*origin) return src ################################################################################ # Mayavi helpers def autocrop_img(img, bg_color): red, green, blue = bg_color outline = ( (img[..., 0] != red) +(img[..., 1] != green) +(img[..., 2] != blue) ) outline_x = outline.sum(axis=0) outline_y = outline.sum(axis=1) outline_x = np.where(outline_x)[0] outline_y = np.where(outline_y)[0] if len(outline_x) == 0: return img else: x_min = outline_x.min() x_max = outline_x.max() if len(outline_y) == 0: return img else: y_min = outline_y.min() y_max = outline_y.max() return img[y_min:y_max, x_min:x_max] def m2screenshot(mayavi_fig=None, mpl_axes=None, autocrop=True): """ Capture a screeshot of the Mayavi figure and display it in the matplotlib axes. """ import matplotlib.pyplot as plt # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab if mayavi_fig is None: mayavi_fig = mlab.gcf() else: mlab.figure(mayavi_fig) if mpl_axes is not None: plt.axes(mpl_axes) # XXX: This is a hack to force Mayavi to render. # It should not be needed if a GUI loop is running, # but just to be safe... # https://github.com/enthought/mayavi/issues/702 mayavi_fig.scene._lift() image3d = mlab.screenshot(figure=mayavi_fig) if autocrop: bg_color = mayavi_fig.scene.background image3d = autocrop_img(image3d, bg_color) plt.imshow(image3d) plt.axis('off') # XXX: Should switch back to previous MPL axes: we have a side effect # here. ################################################################################ # Anatomy outline ################################################################################ def plot_anat_3d(anat=None, anat_affine=None, scale=1, sulci_opacity=0.5, gyri_opacity=0.3, opacity=None, skull_percentile=78, wm_percentile=79, outline_color=None): """ 3D anatomical display Parameters ---------- skull_percentile : float, optional The percentile of the values in the image that delimit the skull from the outside of the brain. The smaller the fraction of you field of view is occupied by the brain, the larger this value should be. wm_percentile : float, optional The percentile of the values in the image that delimit the white matter from the grey matter. Typical this is skull_percentile + 1 """ # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab fig = mlab.gcf() disable_render = fig.scene.disable_render fig.scene.disable_render = True if anat is None: anat, anat_affine, anat_max = _AnatCache.get_anat() anat_blurred = _AnatCache.get_blurred() skull_threshold = 4800 inner_threshold = 5000 upper_threshold = 7227.8 else: from scipy import ndimage # XXX: This should be in a separate function voxel_size = np.sqrt((anat_affine[:3, :3]**2).sum()/3.) skull_threshold = stats.scoreatpercentile(anat.ravel(), skull_percentile) inner_threshold = stats.scoreatpercentile(anat.ravel(), wm_percentile) upper_threshold = anat.max() anat_blurred = ndimage.gaussian_filter( (ndimage.morphology.binary_fill_holes( ndimage.gaussian_filter( (anat > skull_threshold).astype(np.float64), 6./voxel_size) > 0.5 )).astype(np.float64), 2./voxel_size).T.ravel() if opacity is None: try: from tvtk.api import tvtk except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.tvtk.api import tvtk version = tvtk.Version() if (version.vtk_major_version, version.vtk_minor_version) < (5, 2): opacity = .99 else: opacity = 1 ########################################################################### # Display the cortical surface (flattenned) anat_src = affine_img_src(anat, anat_affine, scale=scale, name='Anat') anat_src.image_data.point_data.add_array(anat_blurred) anat_src.image_data.point_data.get_array(1).name = 'blurred' anat_src.image_data.point_data.update() anat_blurred = mlab.pipeline.set_active_attribute( anat_src, point_scalars='blurred') anat_blurred.update_pipeline() # anat_blurred = anat_src cortex_surf = mlab.pipeline.set_active_attribute( mlab.pipeline.contour(anat_blurred), point_scalars='scalar') # XXX: the choice in vmin and vmax should be tuned to show the # sulci better cortex = mlab.pipeline.surface(cortex_surf, colormap='copper', opacity=opacity, vmin=skull_threshold, vmax=inner_threshold) cortex.enable_contours = True cortex.contour.filled_contours = True cortex.contour.auto_contours = False cortex.contour.contours = [0, inner_threshold, upper_threshold] #cortex.actor.property.backface_culling = True # XXX: Why do we do 'frontface_culling' to see the front. cortex.actor.property.frontface_culling = True cortex.actor.mapper.interpolate_scalars_before_mapping = True cortex.actor.property.interpolation = 'flat' # Add opacity variation to the colormap cmap = cortex.module_manager.scalar_lut_manager.lut.table.to_array() cmap[128:, -1] = gyri_opacity*255 cmap[:128, -1] = sulci_opacity*255 cortex.module_manager.scalar_lut_manager.lut.table = cmap if outline_color is not None: outline = mlab.pipeline.iso_surface( anat_blurred, contours=[0.4], color=outline_color, opacity=.9) outline.actor.property.backface_culling = True fig.scene.disable_render = disable_render return cortex ################################################################################ # Maps ################################################################################ def plot_map_3d(map, affine, cut_coords=None, anat=None, anat_affine=None, threshold=None, offscreen=False, vmin=None, vmax=None, cmap=None, view=(38.5, 70.5, 300, (-2.7, -12, 9.1)), ): """ Plot a 3D volume rendering view of the activation, with an outline of the brain. Parameters ---------- map : 3D ndarray The activation map, as a 3D image. affine : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: 3-tuple of floats, optional The MNI coordinates of a 3D cursor to indicate a feature or a cut, in MNI coordinates and order. anat : 3D ndarray, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anatomical image is used. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicit anatomical image. threshold : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. offscreen: boolean, optional If True, Mayavi attempts to plot offscreen. Will work only with VTK >= 5.2. vmin : float, optional The minimal value, for the colormap vmax : float, optional The maximum value, for the colormap cmap : a callable, or a pyplot colormap A callable returning a (n, 4) array for n values between 0 and 1 for the colors. This can be for instance a pyplot colormap. Notes ----- If you are using a VTK version below 5.2, there is no way to avoid opening a window during the rendering under Linux. This is necessary to use the graphics card for the rendering. You must maintain this window on top of others and on the screen. """ # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab if offscreen: global off_screen_engine if off_screen_engine is None: try: from mayavi.core.off_screen_engine import OffScreenEngine except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi.core.off_screen_engine import OffScreenEngine off_screen_engine = OffScreenEngine() off_screen_engine.start() fig = mlab.figure('__private_plot_map_3d__', bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 330), engine=off_screen_engine) mlab.clf(figure=fig) else: fig = mlab.gcf() fig = mlab.figure(fig, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 350)) disable_render = fig.scene.disable_render fig.scene.disable_render = True if threshold is None: threshold = stats.scoreatpercentile( np.abs(map).ravel(), 80) contours = [] lower_map = map[map <= -threshold] if np.any(lower_map): contours.append(lower_map.max()) upper_map = map[map >= threshold] if np.any(upper_map): contours.append(map[map > threshold].min()) ########################################################################### # Display the map using iso-surfaces if len(contours) > 0: map_src = affine_img_src(map, affine) module = mlab.pipeline.iso_surface(map_src, contours=contours, vmin=vmin, vmax=vmax) if hasattr(cmap, '__call__'): # Stick the colormap in mayavi module.module_manager.scalar_lut_manager.lut.table \ = (255*cmap(np.linspace(0, 1, 256))).astype(np.int_) else: module = None if not anat is False: plot_anat_3d(anat=anat, anat_affine=anat_affine, scale=1.05, outline_color=(.9, .9, .9), gyri_opacity=.2) ########################################################################### # Draw the cursor if cut_coords is not None: x0, y0, z0 = cut_coords mlab.plot3d((-90, 90), (y0, y0), (z0, z0), color=(.5, .5, .5), tube_radius=0.25) mlab.plot3d((x0, x0), (-126, 91), (z0, z0), color=(.5, .5, .5), tube_radius=0.25) mlab.plot3d((x0, x0), (y0, y0), (-72, 109), color=(.5, .5, .5), tube_radius=0.25) mlab.view(*view) fig.scene.disable_render = disable_render return module def demo_plot_map_3d(): map = np.zeros((182, 218, 182)) # Color a asymmetric rectangle around Broca area: x, y, z = -52, 10, 22 x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) map[x_map-5:x_map+5, y_map-3:y_map+3, z_map-10:z_map+10] = 1 plot_map_3d(map, mni_sform, cut_coords=(x, y, z)) nipy-0.6.1/nipy/labs/viz_tools/slicers.py000066400000000000000000000632361470056100100204470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The Slicer classes. The main purpose of these classes is to have auto adjust of axes size to the data with different layout of cuts. """ import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from matplotlib import transforms from nipy.utils import is_iterable # Local imports from ..datasets import VolumeImg from . import cm from .coord_tools import coord_transform, find_cut_coords, get_bounds, get_mask_bounds from .edge_detect import _edge_map ################################################################################ # Bugware to have transparency work OK with MPL < .99.1 if mpl.__version__ < '0.99.1': # We wrap the lut as a callable and replace its evaluation to put # alpha to zero where the mask is true. This is what is done if # MPL >= .99.1 from matplotlib import colors class CMapProxy(colors.Colormap): def __init__(self, lut): self.__lut = lut def __call__(self, arr, *args, **kwargs): results = self.__lut(arr, *args, **kwargs) if not isinstance(arr, np.ma.MaskedArray): return results else: results[arr.mask, -1] = 0 return results def __getattr__(self, attr): # Dark magic: we are delegating any call to the lut instance # we wrap return self.__dict__.get(attr, getattr(self.__lut, attr)) def _xyz_order(map, affine): img = VolumeImg(map, affine=affine, world_space='mine') img = img.xyz_ordered(resample=True, copy=False) map = img.get_fdata() affine = img.affine return map, affine ################################################################################ # class CutAxes ################################################################################ class CutAxes: """ An MPL axis-like object that displays a cut of 3D volumes """ def __init__(self, ax, direction, coord): """ An MPL axis-like object that displays a cut of 3D volumes Parameters ========== ax: a MPL axes instance The axes in which the plots will be drawn direction: {'x', 'y', 'z'} The directions of the cut coord: float The coordinate along the direction of the cut """ self.ax = ax self.direction = direction self.coord = coord self._object_bounds = [] def do_cut(self, map, affine): """ Cut the 3D volume into a 2D slice Parameters ========== map: 3D ndarray The 3D volume to cut affine: 4x4 ndarray The affine of the volume """ coords = [0, 0, 0] coords['xyz'.index(self.direction)] = self.coord x_map, y_map, z_map = (int(np.round(c)) for c in coord_transform(coords[0], coords[1], coords[2], np.linalg.inv(affine))) if self.direction == 'y': cut = np.rot90(map[:, y_map, :]) elif self.direction == 'x': cut = np.rot90(map[x_map, :, :]) elif self.direction == 'z': cut = np.rot90(map[:, :, z_map]) else: raise ValueError(f'Invalid value for direction {self.direction}') return cut def draw_cut(self, cut, data_bounds, bounding_box, type='imshow', **kwargs): # kwargs massaging kwargs['origin'] = 'upper' if mpl.__version__ < '0.99.1': cmap = kwargs.get('cmap', plt.cm.cmap_d[plt.rcParams['image.cmap']]) kwargs['cmap'] = CMapProxy(cmap) if self.direction == 'y': (xmin, xmax), (_, _), (zmin, zmax) = data_bounds (xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box elif self.direction == 'x': (_, _), (xmin, xmax), (zmin, zmax) = data_bounds (_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box elif self.direction == 'z': (xmin, xmax), (zmin, zmax), (_, _) = data_bounds (xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box else: raise ValueError(f'Invalid value for direction {self.direction}') ax = self.ax getattr(ax, type)(cut, extent=(xmin, xmax, zmin, zmax), **kwargs) self._object_bounds.append((xmin_, xmax_, zmin_, zmax_)) ax.axis(self.get_object_bounds()) def get_object_bounds(self): """ Return the bounds of the objects on this axes. """ if len(self._object_bounds) == 0: # Nothing plotted yet return -.01, .01, -.01, .01 xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T xmax = max(xmaxs.max(), xmins.max()) xmin = min(xmins.min(), xmaxs.min()) ymax = max(ymaxs.max(), ymins.max()) ymin = min(ymins.min(), ymaxs.min()) return xmin, xmax, ymin, ymax def draw_left_right(self, size, bg_color, **kwargs): if self.direction == 'x': return ax = self.ax ax.text(.1, .95, 'L', transform=ax.transAxes, horizontalalignment='left', verticalalignment='top', size=size, bbox={'boxstyle': "square,pad=0", 'ec': bg_color, 'fc': bg_color, 'alpha': 1}, **kwargs) ax.text(.9, .95, 'R', transform=ax.transAxes, horizontalalignment='right', verticalalignment='top', size=size, bbox={'boxstyle': "square,pad=0", 'ec': bg_color, 'fc': bg_color, 'alpha': 1}, **kwargs) def draw_position(self, size, bg_color, **kwargs): ax = self.ax ax.text(0, 0, '%s=%i' % (self.direction, self.coord), transform=ax.transAxes, horizontalalignment='left', verticalalignment='bottom', size=size, bbox={'boxstyle': "square,pad=0", 'ec': bg_color, 'fc': bg_color, 'alpha': 1}, **kwargs) ################################################################################ # class BaseSlicer ################################################################################ class BaseSlicer: """ The main purpose of these class is to have auto adjust of axes size to the data with different layout of cuts. """ # This actually encodes the figsize for only one axe _default_figsize = [2.2, 2.6] def __init__(self, cut_coords, axes=None, black_bg=False): """ Create 3 linked axes for plotting orthogonal cuts. Parameters ---------- cut_coords: 3 tuple of ints The cut position, in world space. axes: matplotlib axes object, optional The axes that will be subdivided in 3. black_bg: boolean, optional If True, the background of the figure will be put to black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pyplot's savefig. """ self._cut_coords = cut_coords if axes is None: axes = plt.axes((0., 0., 1., 1.)) axes.axis('off') self.frame_axes = axes axes.set_zorder(1) bb = axes.get_position() self.rect = (bb.x0, bb.y0, bb.x1, bb.y1) self._black_bg = black_bg self._init_axes() @staticmethod def find_cut_coords(data=None, affine=None, threshold=None, cut_coords=None): # Implement this as a staticmethod or a classmethod when # subclassing raise NotImplementedError @classmethod def init_with_figure(cls, data=None, affine=None, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False): cut_coords = cls.find_cut_coords(data, affine, threshold, cut_coords) if isinstance(axes, plt.Axes) and figure is None: figure = axes.figure if not isinstance(figure, plt.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = plt.figure(figure, figsize=figsize, facecolor=facecolor) else: if isinstance(axes, plt.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if is_iterable(axes): axes = figure.add_axes(axes) # People forget to turn their axis off, or to set the zorder, and # then they cannot see their slicer axes.axis('off') return cls(cut_coords, axes, black_bg) def title(self, text, x=0.01, y=0.99, size=15, color=None, bgcolor=None, alpha=1, **kwargs): """ Write a title to the view. Parameters ---------- text: string The text of the title x: float, optional The horizontal position of the title on the frame in fraction of the frame width. y: float, optional The vertical position of the title on the frame in fraction of the frame height. size: integer, optional The size of the title text. color: matplotlib color specifier, optional The color of the font of the title. bgcolor: matplotlib color specifier, optional The color of the background of the title. alpha: float, optional The alpha value for the background. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ if color is None: color = 'k' if self._black_bg else 'w' if bgcolor is None: bgcolor = 'w' if self._black_bg else 'k' self.frame_axes.text(x, y, text, transform=self.frame_axes.transAxes, horizontalalignment='left', verticalalignment='top', size=size, color=color, bbox={'boxstyle': "square,pad=.3", 'ec': bgcolor, 'fc': bgcolor, 'alpha': alpha}, **kwargs) def plot_map(self, map, affine, threshold=None, **kwargs): """ Plot a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. threshold : a number, None, or 'auto' If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold are plotted as transparent. kwargs: Extra keyword arguments are passed to imshow. """ if threshold is not None: if threshold == 0: map = np.ma.masked_equal(map, 0, copy=False) else: map = np.ma.masked_inside(map, -threshold, threshold, copy=False) self._map_show(map, affine, type='imshow', **kwargs) def contour_map(self, map, affine, **kwargs): """ Contour a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. kwargs: Extra keyword arguments are passed to contour. """ self._map_show(map, affine, type='contour', **kwargs) def _map_show(self, map, affine, type='imshow', **kwargs): map, affine = _xyz_order(map, affine) data_bounds = get_bounds(map.shape, affine) (xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ xmin, xmax, ymin, ymax, zmin, zmax if hasattr(map, 'mask'): not_mask = np.logical_not(map.mask) xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ get_mask_bounds(not_mask, affine) if kwargs.get('vmin') is None and kwargs.get('vmax') is None: # Avoid dealing with masked arrays: they are slow if not np.any(not_mask): # Everything is masked vmin = vmax = 0 else: masked_map = np.asarray(map)[not_mask] vmin = masked_map.min() vmax = masked_map.max() if kwargs.get('vmin') is None: kwargs['vmin'] = vmin if kwargs.get('max') is None: kwargs['vmax'] = vmax else: if 'vmin' not in kwargs: kwargs['vmin'] = map.min() if 'vmax' not in kwargs: kwargs['vmax'] = map.max() bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_) # For each ax, cut the data and plot it for cut_ax in self.axes.values(): try: cut = cut_ax.do_cut(map, affine) except IndexError: # We are cutting outside the indices of the data continue cut_ax.draw_cut(cut, data_bounds, bounding_box, type=type, **kwargs) def edge_map(self, map, affine, color='r'): """ Plot the edges of a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. color: matplotlib color: string or (r, g, b) value The color used to display the edge map """ map, affine = _xyz_order(map, affine) kwargs = {'cmap': cm.alpha_cmap(color=color)} data_bounds = get_bounds(map.shape, affine) # For each ax, cut the data and plot it for cut_ax in self.axes.values(): try: cut = cut_ax.do_cut(map, affine) edge_mask = _edge_map(cut) except IndexError: # We are cutting outside the indices of the data continue cut_ax.draw_cut(edge_mask, data_bounds, data_bounds, type='imshow', **kwargs) def annotate(self, left_right=True, positions=True, size=12, **kwargs): """ Add annotations to the plot. Parameters ---------- left_right: boolean, optional If left_right is True, annotations indicating which side is left and which side is right are drawn. positions: boolean, optional If positions is True, annotations indicating the positions of the cuts are drawn. size: integer, optional The size of the text used. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ kwargs = kwargs.copy() if 'color' not in kwargs: if self._black_bg: kwargs['color'] = 'w' else: kwargs['color'] = 'k' bg_color = ('k' if self._black_bg else 'w') if left_right: for cut_ax in self.axes.values(): cut_ax.draw_left_right(size=size, bg_color=bg_color, **kwargs) if positions: for cut_ax in self.axes.values(): cut_ax.draw_position(size=size, bg_color=bg_color, **kwargs) ################################################################################ # class OrthoSlicer ################################################################################ class OrthoSlicer(BaseSlicer): """ A class to create 3 linked axes for plotting orthogonal cuts of 3D maps. Attributes ---------- axes: dictionary of axes The 3 axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ @staticmethod def find_cut_coords(data=None, affine=None, threshold=None, cut_coords=None): if cut_coords is None: if data is None or data is False: cut_coords = (0, 0, 0) else: x_map, y_map, z_map = find_cut_coords(data, activation_threshold=threshold) cut_coords = coord_transform(x_map, y_map, z_map, affine) return cut_coords def _init_axes(self): x0, y0, x1, y1 = self.rect # Create our axes: self.axes = {} for index, direction in enumerate(('y', 'x', 'z')): ax = plt.axes([0.3*index*(x1-x0) + x0, y0, .3*(x1-x0), y1-y0]) ax.axis('off') coord = self._cut_coords['xyz'.index(direction)] cut_ax = CutAxes(ax, direction, coord) self.axes[direction] = cut_ax ax.set_axes_locator(self._locator) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = {} cut_ax_dict = self.axes x_ax = cut_ax_dict['x'] y_ax = cut_ax_dict['y'] z_ax = cut_ax_dict['z'] for cut_ax in cut_ax_dict.values(): bounds = cut_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # successful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[cut_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.items(): width_dict[ax] = width/total_width*(x1 -x0) left_dict = {} left_dict[y_ax.ax] = x0 left_dict[x_ax.ax] = x0 + width_dict[y_ax.ax] left_dict[z_ax.ax] = x0 + width_dict[x_ax.ax] + width_dict[y_ax.ax] return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinates are used. kwargs: Extra keyword arguments are passed to axhline """ if cut_coords is None: cut_coords = self._cut_coords x, y, z = cut_coords kwargs = kwargs.copy() if 'color' not in kwargs: if self._black_bg: kwargs['color'] = '.8' else: kwargs['color'] = 'k' ax = self.axes['y'].ax ax.axvline(x, ymin=.05, ymax=.95, **kwargs) ax.axhline(z, **kwargs) ax = self.axes['x'].ax ax.axvline(y, ymin=.05, ymax=.95, **kwargs) ax.axhline(z, xmax=.95, **kwargs) ax = self.axes['z'].ax ax.axvline(x, ymin=.05, ymax=.95, **kwargs) ax.axhline(y, **kwargs) def demo_ortho_slicer(): """ A small demo of the OrthoSlicer functionality. """ plt.clf() oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) from .anat_cache import _AnatCache map, affine, _ = _AnatCache.get_anat() oslicer.plot_map(map, affine, cmap=plt.cm.gray) return oslicer ################################################################################ # class BaseStackedSlicer ################################################################################ class BaseStackedSlicer(BaseSlicer): """ A class to create linked axes for plotting stacked cuts of 3D maps. Attributes ---------- axes: dictionary of axes The axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ @classmethod def find_cut_coords(cls, data=None, affine=None, threshold=None, cut_coords=None): if cut_coords is None: if data is None or data is False: bounds = ((-40, 40), (-30, 30), (-30, 75)) else: if hasattr(data, 'mask'): mask = np.logical_not(data.mask) else: # The mask will be anything that is fairly different # from the values in the corners edge_value = float(data[0, 0, 0] + data[0, -1, 0] + data[-1, 0, 0] + data[0, 0, -1] + data[-1, -1, 0] + data[-1, 0, -1] + data[0, -1, -1] + data[-1, -1, -1] ) edge_value /= 6 mask = np.abs(data - edge_value) > .005*data.ptp() xmin, xmax, ymin, ymax, zmin, zmax = \ get_mask_bounds(mask, affine) bounds = (xmin, xmax), (ymin, ymax), (zmin, zmax) lower, upper = bounds['xyz'.index(cls._direction)] cut_coords = np.linspace(lower, upper, 10).tolist() return cut_coords def _init_axes(self): x0, y0, x1, y1 = self.rect # Create our axes: self.axes = {} fraction = 1./len(self._cut_coords) for index, coord in enumerate(self._cut_coords): coord = float(coord) ax = plt.axes([fraction*index*(x1-x0) + x0, y0, fraction*(x1-x0), y1-y0]) ax.axis('off') cut_ax = CutAxes(ax, self._direction, coord) self.axes[coord] = cut_ax ax.set_axes_locator(self._locator) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = {} cut_ax_dict = self.axes for cut_ax in cut_ax_dict.values(): bounds = cut_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # successful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[cut_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.items(): width_dict[ax] = width/total_width*(x1 -x0) left_dict = {} left = float(x0) for coord, cut_ax in sorted(cut_ax_dict.items()): left_dict[cut_ax.ax] = left this_width = width_dict[cut_ax.ax] left += this_width return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinates are used. kwargs: Extra keyword arguments are passed to axhline """ return class XSlicer(BaseStackedSlicer): _direction = 'x' _default_figsize = [2.2, 2.3] class YSlicer(BaseStackedSlicer): _direction = 'y' _default_figsize = [2.6, 2.3] class ZSlicer(BaseStackedSlicer): _direction = 'z' SLICERS = {'ortho': OrthoSlicer, 'x': XSlicer, 'y': YSlicer, 'z': ZSlicer} nipy-0.6.1/nipy/labs/viz_tools/test/000077500000000000000000000000001470056100100173765ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/viz_tools/test/__init__.py000066400000000000000000000000001470056100100214750ustar00rootroot00000000000000nipy-0.6.1/nipy/labs/viz_tools/test/test_activation_maps.py000066400000000000000000000060141470056100100241710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import tempfile import numpy as np import pytest try: import matplotlib as mpl # Make really sure that we don't try to open an Xserver connection. mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') except ImportError: pytest.skip("Could not import matplotlib", allow_module_level=True) from unittest.mock import patch from ..activation_maps import demo_plot_map, plot_anat, plot_map from ..anat_cache import _AnatCache, mni_sform def test_demo_plot_map(): # This is only a smoke test mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') demo_plot_map() # Test the black background code path demo_plot_map(black_bg=True) def test_plot_anat(): # This is only a smoke test mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') data = np.zeros((20, 20, 20)) data[3:-3, 3:-3, 3:-3] = 1 ortho_slicer = plot_anat(data, mni_sform, dim=True) ortho_slicer = plot_anat(data, mni_sform, cut_coords=(80, -120, -60)) # Saving forces a draw, and thus smoke-tests the axes locators plt.savefig(tempfile.TemporaryFile()) ortho_slicer.edge_map(data, mni_sform, color='c') # Test saving with empty plot z_slicer = plot_anat(anat=False, slicer='z') plt.savefig(tempfile.TemporaryFile()) z_slicer = plot_anat(slicer='z') plt.savefig(tempfile.TemporaryFile()) z_slicer.edge_map(data, mni_sform, color='c') # Smoke test coordinate finder, with and without mask plot_map(np.ma.masked_equal(data, 0), mni_sform, slicer='x') plot_map(data, mni_sform, slicer='y') def test_plot_anat_kwargs(): data = np.zeros((20, 20, 20)) data[3:-3, 3:-3, 3:-3] = 1 kwargs = {'interpolation': 'nearest'} with patch('nipy.labs.viz_tools.activation_maps._plot_anat') \ as mock_plot_anat: ortho_slicer = plot_anat(data, mni_sform, dim=True, **kwargs) kwargs_passed = mock_plot_anat.call_args[-1] assert('interpolation' in kwargs_passed) assert(kwargs_passed['interpolation'] == 'nearest') def test_anat_cache(): # A smoke test, that can work only if the templates are installed try: _AnatCache.get_blurred() except OSError: "The templates are not there" def test_plot_map_empty(): # Test that things don't crash when we give a map with nothing above # threshold # This is only a smoke test mpl.use('svg') import matplotlib.pyplot as pl plt.switch_backend('svg') data = np.zeros((20, 20, 20)) plot_anat(data, mni_sform) plot_map(data, mni_sform, slicer='y', threshold=1) plt.close('all') def test_plot_map_with_auto_cut_coords(): import matplotlib.pyplot as pl plt.switch_backend('svg') data = np.zeros((20, 20, 20)) data[3:-3, 3:-3, 3:-3] = 1 for slicer in 'xyz': plot_map(data, np.eye(4), cut_coords=None, slicer=slicer, black_bg=True) nipy-0.6.1/nipy/labs/viz_tools/test/test_cm.py000066400000000000000000000020171470056100100214060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Smoke testing the cm module """ import pytest try: import matplotlib as mpl # Make really sure that we don't try to open an Xserver connection. mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') except ImportError: pytest.skip("Could not import matplotlib", allow_module_level=True) from ..cm import dim_cmap, replace_inside def test_dim_cmap(): # This is only a smoke test mpl.use('svg') import matplotlib.pyplot as plt dim_cmap(plt.cm.jet) def test_replace_inside(): # This is only a smoke test mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') replace_inside(plt.cm.jet, plt.cm.hsv, .2, .8) # We also test with gnuplot, which is defined using function if hasattr(plt.cm, 'gnuplot'): # gnuplot is only in recent version of MPL replace_inside(plt.cm.gnuplot, plt.cm.gnuplot2, .2, .8) nipy-0.6.1/nipy/labs/viz_tools/test/test_coord_tools.py000066400000000000000000000036031470056100100233370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.testing import assert_array_equal from ..coord_tools import coord_transform, find_cut_coords, find_maxsep_cut_coords def test_coord_transform_trivial(): sform = np.eye(4) x = np.random.random((10,)) y = np.random.random((10,)) z = np.random.random((10,)) x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x, x_) np.testing.assert_array_equal(y, y_) np.testing.assert_array_equal(z, z_) sform[:, -1] = 1 x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x+1, x_) np.testing.assert_array_equal(y+1, y_) np.testing.assert_array_equal(z+1, z_) def test_find_cut_coords(): map = np.zeros((100, 100, 100)) x_map, y_map, z_map = 50, 10, 40 map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 x, y, z = find_cut_coords(map, mask=np.ones(map.shape, np.bool_)) np.testing.assert_array_equal( (int(round(x)), int(round(y)), int(round(z))), (x_map, y_map, z_map)) def test_find_maxsep_cut_coords(): # Test find_maxsep_cut_coords function assert_array_equal( find_maxsep_cut_coords(np.ones((2, 3, 5)), np.eye(4)), list(range(5))) assert_array_equal( find_maxsep_cut_coords(np.ones((2, 3, 5)), np.eye(4), threshold=1), list(range(5))) assert_array_equal( find_maxsep_cut_coords(np.ones((2, 3, 4)), np.eye(4), n_cuts=4), list(range(4))) map_3d = np.ones((2, 3, 5)) map_3d[:, :, 1] = 0 assert_array_equal( find_maxsep_cut_coords(map_3d, np.eye(4), n_cuts=4), [0, 2, 3, 4]) map_3d[:, :, 1] = 0.5 assert_array_equal( find_maxsep_cut_coords(map_3d, np.eye(4), n_cuts=4, threshold=0.6), [0, 2, 3, 4]) nipy-0.6.1/nipy/labs/viz_tools/test/test_edge_detect.py000066400000000000000000000010661470056100100232460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ..edge_detect import _edge_detect, _fast_abs_percentile ################################################################################ def test_fast_abs_percentile(): data = np.arange(1, 100) for p in range(10, 100, 10): assert _fast_abs_percentile(data, p-1) == p def test_edge_detect(): img = np.zeros((10, 10)) img[:5] = 1 _, edge_mask = _edge_detect(img) assert np.allclose(img[4], 1) nipy-0.6.1/nipy/labs/viz_tools/test/test_slicers.py000066400000000000000000000015311470056100100224530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import pytest try: import matplotlib as mpl except ImportError: pytest.skip("Could not import matplotlib", allow_module_level=True) from ..anat_cache import find_mni_template from ..slicers import demo_ortho_slicer ################################################################################ # Some smoke testing for graphics-related code def test_demo_ortho_slicer(): # This is only a smoke test # conditioned on presence of MNI templated if not find_mni_template(): pytest.skip("MNI Template is absent for the smoke test") # Make really sure that we don't try to open an Xserver connection. mpl.use('svg') import matplotlib.pyplot as plt plt.switch_backend('svg') demo_ortho_slicer() nipy-0.6.1/nipy/meson.build000066400000000000000000000014001470056100100156030ustar00rootroot00000000000000python_sources = [ '__init__.py', 'info.py', 'pkg_info.py', 'conftest.py', ] py.install_sources( python_sources, pure: false, subdir: 'nipy' ) cython_cli = find_program('_build_utils/cythoner.py') cython_gen = generator(cython_cli, arguments : ['@INPUT@', '@OUTPUT@'], output : '@BASENAME@.c') # Suppress warning for deprecated Numpy API. # (Suppress warning messages emitted by #warning directives). cython_c_args += [use_math_defines, numpy_nodepr_api] pure_subdirs = [ 'cli', 'core', 'interfaces', 'io', 'modalities', 'testing', 'tests', 'utils' ] install_root = py.get_install_dir() foreach subdir: pure_subdirs install_subdir(subdir, install_dir: install_root / 'nipy') endforeach subdir('algorithms') subdir('labs') nipy-0.6.1/nipy/modalities/000077500000000000000000000000001470056100100156005ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/__init__.py000066400000000000000000000003131470056100100177060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package containing modality-specific classes. """ __docformat__ = 'restructuredtext' nipy-0.6.1/nipy/modalities/fmri/000077500000000000000000000000001470056100100165355ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/__init__.py000066400000000000000000000002741470056100100206510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ TODO """ __docformat__ = 'restructuredtext' from . import fmristat nipy-0.6.1/nipy/modalities/fmri/api.py000066400000000000000000000002431470056100100176570ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .fmri import FmriImageList, axis0_generator nipy-0.6.1/nipy/modalities/fmri/design.py000066400000000000000000000461411470056100100203660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Convenience functions for specifying a design in the GLM """ import itertools from functools import reduce from operator import mul import numpy as np from nipy.algorithms.statistics.formula import formulae from nipy.algorithms.statistics.formula.formulae import ( Factor, Formula, Term, make_recarray, ) from nipy.algorithms.statistics.utils import combinations from .hrf import glover from .utils import T, blocks, convolve_functions, events from .utils import fourier_basis as fourier_basis_sym def fourier_basis(t, freq): """ Create a design matrix with columns given by the Fourier basis with a given set of frequencies. Parameters ---------- t : np.ndarray An array of np.float64 values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. freq : sequence of float Frequencies for the terms in the Fourier basis. Returns ------- X : np.ndarray Examples -------- >>> t = np.linspace(0,50,101) >>> drift = fourier_basis(t, np.array([4,6,8])) >>> drift.shape (101, 6) """ tval = make_recarray(t, ['t']) f = fourier_basis_sym(freq) return f.design(tval, return_float=True) def natural_spline(tvals, knots=None, order=3, intercept=True): """ Design matrix with columns given by a natural spline order `order` Return design matrix with natural splines with knots `knots`, order `order`. If `intercept` == True (the default), add constant column. Parameters ---------- tvals : np.array Time values knots : None or sequence, optional Sequence of float. Default None (same as empty list) order : int, optional Order of the spline. Defaults to a cubic (==3) intercept : bool, optional If True, include a constant function in the natural spline. Default is False Returns ------- X : np.ndarray Examples -------- >>> tvals = np.linspace(0,50,101) >>> drift = natural_spline(tvals, knots=[10,20,30,40]) >>> drift.shape (101, 8) """ tvals = make_recarray(tvals, ['t']) t = Term('t') f = formulae.natural_spline(t, knots=knots, order=order, intercept=intercept) return f.design(tvals, return_float=True) def _build_formula_contrasts(spec, fields, order): """ Build formula and contrast in event / block space Parameters ---------- spec : structured array Structured array containing at least fields listed in `fields`. fields : sequence of str Sequence of field names containing names of factors. order : int Maximum order of interactions between main effects. Returns ------- e_factors : :class:`Formula` instance Formula for factors given by `fields` e_contrasts : dict Dictionary containing contrasts of main effects and interactions between factors. """ if len(fields) == 0: raise ValueError('Specify at least one field') e_factors = [Factor(n, np.unique(spec[n])) for n in fields] e_formula = reduce(mul, e_factors) e_contrasts = {} # Add contrasts for factors and factor interactions max_order = min(len(e_factors), order) for i in range(1, max_order + 1): for comb in combinations(zip(fields, e_factors), i): names = [c[0] for c in comb] # Collect factors where there is more than one level fs = [fc.main_effect for fn, fc in comb if len(fc.levels) > 1] if len(fs) > 0: e_contrast = reduce(mul, fs).design(spec) e_contrasts[":".join(names)] = e_contrast e_contrasts['constant'] = formulae.I.design(spec) return e_formula, e_contrasts def event_design(event_spec, t, order=2, hrfs=(glover,), level_contrasts=False): """ Create design matrix at times `t` for event specification `event_spec` Create a design matrix for linear model based on an event specification `event_spec`, evaluating the design rows at a sequence of time values `t`. Each column in the design matrix will be convolved with each HRF in `hrfs`. Parameters ---------- event_spec : np.recarray A recarray having at least a field named 'time' signifying the event time, and all other fields will be treated as factors in an ANOVA-type model. If there is no field other than time, add a single-level placeholder event type ``_event_``. t : np.ndarray An array of np.float64 values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. order : int, optional The highest order interaction to be considered in constructing the contrast matrices. hrfs : sequence, optional A sequence of (symbolic) HRFs that will be convolved with each event. Default is ``(glover,)``. level_contrasts : bool, optional If True, generate contrasts for each individual level of each factor. Returns ------- X : np.ndarray The design matrix with ``X.shape[0] == t.shape[0]``. The number of columns will depend on the other fields of `event_spec`. contrasts : dict Dictionary of contrasts that is expected to be of interest from the event specification. Each interaction / effect up to a given order will be returned. Also, a contrast is generated for each interaction / effect for each HRF specified in `hrfs`. """ fields = list(event_spec.dtype.names) if 'time' not in fields: raise ValueError('expecting a field called "time"') fields.pop(fields.index('time')) if len(fields) == 0: # No factors specified, make generic event event_spec = make_recarray(zip(event_spec['time'], itertools.cycle([1])), ('time', '_event_')) fields = ['_event_'] e_formula, e_contrasts = _build_formula_contrasts( event_spec, fields, order) # Design and contrasts in block space # TODO: make it so I don't have to call design twice here # to get both the contrasts and the e_X matrix as a recarray e_X = e_formula.design(event_spec) e_dtype = e_formula.dtype # Now construct the design in time space t_terms = [] t_contrasts = {} for l, h in enumerate(hrfs): for n in e_dtype.names: term = events(event_spec['time'], amplitudes=e_X[n], f=h) t_terms += [term] if level_contrasts: t_contrasts['%s_%d' % (n, l)] = Formula([term]) for n, c in e_contrasts.items(): t_contrasts["%s_%d" % (n, l)] = Formula([ \ events(event_spec['time'], amplitudes=c[nn], f=h) for i, nn in enumerate(c.dtype.names)]) t_formula = Formula(t_terms) tval = make_recarray(t, ['t']) X_t, c_t = t_formula.design(tval, contrasts=t_contrasts) return X_t, c_t def block_design(block_spec, t, order=2, hrfs=(glover,), convolution_padding=5., convolution_dt=0.02, hrf_interval=(0.,30.), level_contrasts=False): """ Create design matrix at times `t` for blocks specification `block_spec` Create design matrix for linear model from a block specification `block_spec`, evaluating design rows at a sequence of time values `t`. Each column in the design matrix will be convolved with each HRF in `hrfs`. Parameters ---------- block_spec : np.recarray A recarray having at least a field named 'start' and a field named 'end' signifying the block onset and offset times. All other fields will be treated as factors in an ANOVA-type model. If there is no field other than 'start' and 'end', add a single-level placeholder block type ``_block_``. t : np.ndarray An array of np.float64 values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. order : int, optional The highest order interaction to be considered in constructing the contrast matrices. hrfs : sequence, optional A sequence of (symbolic) HRFs that will be convolved with each block. Default is ``(glover,)``. convolution_padding : float, optional A padding for the convolution with the HRF. The intervals used for the convolution are the smallest 'start' minus this padding to the largest 'end' plus this padding. convolution_dt : float, optional Time step for high-resolution time course for use in convolving the blocks with each HRF. hrf_interval: length 2 sequence of floats, optional Interval over which the HRF is assumed supported, used in the convolution. level_contrasts : bool, optional If true, generate contrasts for each individual level of each factor. Returns ------- X : np.ndarray The design matrix with ``X.shape[0] == t.shape[0]``. The number of columns will depend on the other fields of `block_spec`. contrasts : dict Dictionary of contrasts that are expected to be of interest from the block specification. Each interaction / effect up to a given order will be returned. Also, a contrast is generated for each interaction / effect for each HRF specified in `hrfs`. """ fields = list(block_spec.dtype.names) if 'start' not in fields or 'end' not in fields: raise ValueError('expecting fields called "start" and "end"') fields.pop(fields.index('start')) fields.pop(fields.index('end')) if len(fields) == 0: # No factors specified, make generic block block_spec = make_recarray(zip(block_spec['start'], block_spec['end'], itertools.cycle([1])), ('start', 'end', '_block_')) fields = ['_block_'] e_formula, e_contrasts = _build_formula_contrasts( block_spec, fields, order) # Design and contrasts in block space # TODO: make it so I don't have to call design twice here # to get both the contrasts and the e_X matrix as a recarray e_X = e_formula.design(block_spec) e_dtype = e_formula.dtype # Now construct the design in time space block_times = np.array(list(zip(block_spec['start'], block_spec['end']))) convolution_interval = (block_times.min() - convolution_padding, block_times.max() + convolution_padding) t_terms = [] t_contrasts = {} for l, h in enumerate(hrfs): for n in e_dtype.names: B = blocks(block_times, amplitudes=e_X[n]) term = convolve_functions(B, h(T), convolution_interval, hrf_interval, convolution_dt) t_terms += [term] if level_contrasts: t_contrasts['%s_%d' % (n, l)] = Formula([term]) for n, c in e_contrasts.items(): F = [] for i, nn in enumerate(c.dtype.names): B = blocks(block_times, amplitudes=c[nn]) F.append(convolve_functions(B, h(T), convolution_interval, hrf_interval, convolution_dt)) t_contrasts["%s_%d" % (n, l)] = Formula(F) t_formula = Formula(t_terms) tval = make_recarray(t, ['t']) X_t, c_t = t_formula.design(tval, contrasts=t_contrasts) return X_t, c_t def stack2designs(old_X, new_X, old_contrasts={}, new_contrasts={}): """ Add some columns to a design matrix that has contrasts matrices already specified, adding some possibly new contrasts as well. This basically performs an np.hstack of old_X, new_X and makes sure the contrast matrices are dealt with accordingly. If two contrasts have the same name, an exception is raised. Parameters ---------- old_X : np.ndarray A design matrix new_X : np.ndarray A second design matrix to be stacked with old_X old_contrast : dict Dictionary of contrasts in the old_X column space new_contrasts : dict Dictionary of contrasts in the new_X column space Returns ------- X : np.ndarray A new design matrix: np.hstack([old_X, new_X]) contrasts : dict The new contrast matrices reflecting changes to the columns. """ contrasts = {} old_X = np.asarray(old_X) new_X = np.asarray(new_X) if old_X.size == 0: return new_X, new_contrasts if new_X.size == 0: return old_X, old_contrasts if old_X.ndim == 1: old_X = old_X[:, None] if new_X.ndim == 1: new_X = new_X[:, None] X = np.hstack([old_X, new_X]) if set(old_contrasts.keys()).intersection(new_contrasts.keys()) != set(): raise ValueError('old and new contrasts must have different names') for n, c in old_contrasts.items(): if c.ndim > 1: cm = np.zeros((c.shape[0], X.shape[1])) cm[:,:old_X.shape[1]] = c else: cm = np.zeros(X.shape[1]) cm[:old_X.shape[1]] = c contrasts[n] = cm for n, c in new_contrasts.items(): if c.ndim > 1: cm = np.zeros((c.shape[0], X.shape[1])) cm[:,old_X.shape[1]:] = c else: cm = np.zeros(X.shape[1]) cm[old_X.shape[1]:] = c contrasts[n] = cm return X, contrasts def stack_contrasts(contrasts, name, keys): """ Create a new F-contrast matrix called 'name' based on a sequence of keys. The contrast is added to contrasts, in-place. Parameters ---------- contrasts : dict Dictionary of contrast matrices name : str Name of new contrast. Should not already be a key of contrasts. keys : sequence of str Keys of contrasts that are to be stacked. Returns ------- None """ if name in contrasts: raise ValueError(f'contrast "{name}" already exists') contrasts[name] = np.vstack([contrasts[k] for k in keys]) def stack_designs(*pairs): r""" Stack a sequence of design / contrast dictionary pairs Uses multiple calls to :func:`stack2designs` Parameters ---------- \*pairs : sequence Elements of either (np.ndarray, dict) or (np.ndarray,) or np.ndarray Returns ------- X : np.ndarray new design matrix: np.hstack([old_X, new_X]) contrasts : dict The new contrast matrices reflecting changes to the columns. """ X = [] contrasts = {} for p in pairs: if isinstance(p, np.ndarray): new_X = p; new_con = {} elif len(p) == 1: # Length one sequence new_X = p[0]; new_con = {} else: # Length 2 sequence new_X, new_con = p X, contrasts = stack2designs(X, new_X, contrasts, new_con) return X, contrasts def openfmri2nipy(ons_dur_amp): """ Contents of OpenFMRI condition file `ons_dur_map` as nipy recarray Parameters ---------- ons_dur_amp : str or array Path to OpenFMRI stimulus file or 2D array containing three columns corresponding to onset, duration, amplitude. Returns ------- block_spec : array Structured array with fields "start" (corresponding to onset time), "end" (onset time plus duration), "amplitude". """ if not isinstance(ons_dur_amp, np.ndarray): ons_dur_amp = np.loadtxt(ons_dur_amp) onsets, durations, amplitudes = ons_dur_amp.T return make_recarray( np.column_stack((onsets, onsets + durations, amplitudes)), names=['start', 'end', 'amplitude'], drop_name_dim=True) def block_amplitudes(name, block_spec, t, hrfs=(glover,), convolution_padding=5., convolution_dt=0.02, hrf_interval=(0.,30.)): """ Design matrix at times `t` for blocks specification `block_spec` Create design matrix for linear model from a block specification `block_spec`, evaluating design rows at a sequence of time values `t`. `block_spec` may specify amplitude of response for each event, if different (see description of `block_spec` parameter below). The on-off step function implied by `block_spec` will be convolved with each HRF in `hrfs` to form a design matrix shape ``(len(t), len(hrfs))``. Parameters ---------- name : str Name of condition block_spec : np.recarray or array-like A recarray having fields ``start, end, amplitude``, or a 2D ndarray / array-like with three columns corresponding to start, end, amplitude. t : np.ndarray An array of np.float64 values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. hrfs : sequence, optional A sequence of (symbolic) HRFs that will be convolved with each block. Default is ``(glover,)``. convolution_padding : float, optional A padding for the convolution with the HRF. The intervals used for the convolution are the smallest 'start' minus this padding to the largest 'end' plus this padding. convolution_dt : float, optional Time step for high-resolution time course for use in convolving the blocks with each HRF. hrf_interval: length 2 sequence of floats, optional Interval over which the HRF is assumed supported, used in the convolution. Returns ------- X : np.ndarray The design matrix with ``X.shape[0] == t.shape[0]``. The number of columns will be ``len(hrfs)``. contrasts : dict A contrast is generated for each HRF specified in `hrfs`. """ block_spec = np.asarray(block_spec) if block_spec.dtype.names is not None: if block_spec.dtype.names not in (('start', 'end'), ('start', 'end', 'amplitude')): raise ValueError('expecting fields called "start", "end" and ' '(optionally) "amplitude"') block_spec = np.array(block_spec.tolist()) block_times = block_spec[:, :2] amplitudes = block_spec[:, 2] if block_spec.shape[1] == 3 else None # Now construct the design in time space convolution_interval = (block_times.min() - convolution_padding, block_times.max() + convolution_padding) B = blocks(block_times, amplitudes=amplitudes) t_terms = [] c_t = {} n_hrfs = len(hrfs) for hrf_no in range(n_hrfs): t_terms.append(convolve_functions(B, hrfs[hrf_no](T), convolution_interval, hrf_interval, convolution_dt)) contrast = np.zeros(n_hrfs) contrast[hrf_no] = 1 c_t[f'{name}_{hrf_no:d}'] = contrast t_formula = Formula(t_terms) tval = make_recarray(t, ['t']) X_t = t_formula.design(tval, return_float=True) return X_t, c_t nipy-0.6.1/nipy/modalities/fmri/design_matrix.py000066400000000000000000000412441470056100100217510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements fMRI Design Matrix creation. The DesignMatrix object is just a container that represents the design matrix. Computations of the different parts of the design matrix are confined to the make_dmtx() function, that instantiates the DesignMatrix object. All the remainder are just ancillary functions. Design matrices contain three different types of regressors: 1. Task-related regressors, that result from the convolution of the experimental paradigm regressors with hemodynamic models 2. User-specified regressors, that represent information available on the data, e.g. motion parameters, physiological data resampled at the acquisition rate, or sinusoidal regressors that model the signal at a frequency of interest. 3. Drift regressors, that represent low_frequency phenomena of no interest in the data; they need to be included to reduce variance estimates. Author: Bertrand Thirion, 2009-2011 """ from warnings import warn import numpy as np from .hemodynamic_models import _orthogonalize, compute_regressor ###################################################################### # Ancillary functions ###################################################################### def _poly_drift(order, frametimes): """Create a polynomial drift matrix Parameters ---------- order, int, number of polynomials in the drift model tmax, float maximal time value used in the sequence this is used to normalize properly the columns Returns ------- pol, array of shape(n_scans, order + 1) all the polynomial drift plus a constant regressor """ order = int(order) pol = np.zeros((np.size(frametimes), order + 1)) tmax = float(frametimes.max()) for k in range(order + 1): pol[:, k] = (frametimes / tmax) ** k pol = _orthogonalize(pol) pol = np.hstack((pol[:, 1:], pol[:, :1])) return pol def _cosine_drift(period_cut, frametimes): """Create a cosine drift matrix with periods greater or equals to period_cut Parameters ---------- period_cut: float Cut period of the low-pass filter (in sec) frametimes: array of shape(nscans) The sampling times (in sec) Returns ------- cdrift: array of shape(n_scans, n_drifts) cosin drifts plus a constant regressor at cdrift[:,0] Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II """ len_tim = len(frametimes) n_times = np.arange(len_tim) hfcut = 1./ period_cut # input parameter is the period dt = frametimes[1] - frametimes[0] # frametimes.max() should be (len_tim-1)*dt order = int(np.floor(2*len_tim*hfcut*dt)) # s.t. hfcut = 1/(2*dt) yields len_tim cdrift = np.zeros((len_tim, order)) nfct = np.sqrt(2.0/len_tim) for k in range(1, order): cdrift[:,k-1] = nfct * np.cos((np.pi/len_tim)*(n_times + .5)*k) cdrift[:,order-1] = 1. # or 1./sqrt(len_tim) to normalize return cdrift def _blank_drift(frametimes): """ Create the blank drift matrix Returns ------- np.ones_like(frametimes) """ return np.reshape(np.ones_like(frametimes), (np.size(frametimes), 1)) def _make_drift(drift_model, frametimes, order=1, hfcut=128.): """Create the drift matrix Parameters ---------- drift_model: string, to be chosen among 'polynomial', 'cosine', 'blank' that specifies the desired drift model frametimes: array of shape(n_scans), list of values representing the desired TRs order: int, optional, order of the drift model (in case it is polynomial) hfcut: float, optional, frequency cut in case of a cosine model Returns ------- drift: array of shape(n_scans, n_drifts), the drift matrix names: list of length(ndrifts), the associated names """ drift_model = drift_model.lower() # for robust comparisons if drift_model == 'polynomial': drift = _poly_drift(order, frametimes) elif drift_model == 'cosine': drift = _cosine_drift(hfcut, frametimes) elif drift_model == 'blank': drift = _blank_drift(frametimes) else: raise NotImplementedError(f"Unknown drift model {drift_model!r}") names = [f'drift_{k}' for k in range(1, drift.shape[1])] names.append('constant') return drift, names def _convolve_regressors(paradigm, hrf_model, frametimes, fir_delays=[0], min_onset=-24): """ Creation of a matrix that comprises the convolution of the conditions onset with a certain hrf model Parameters ---------- paradigm: paradigm instance hrf_model: string that can be 'canonical', 'canonical with derivative' or 'fir' that specifies the hemodynamic response function frametimes: array of shape(n_scans) the targeted timing for the design matrix fir_delays=[0], optional, array of shape(nb_onsets) or list in case of FIR design, yields the array of delays used in the FIR model min_onset: float, optional minimal onset relative to frametimes[0] (in seconds) events that start before frametimes[0] + min_onset are not considered Returns ------- rmatrix: array of shape(n_scans, n_regressors), contains the convolved regressors associated with the experimental condition names: list of strings, the condition names, that depend on the hrf model used if 'canonical' then this is identical to the input names if 'canonical with derivative', then two names are produced for input name 'name': 'name' and 'name_derivative' """ hnames = [] rmatrix = None if hrf_model == 'fir': oversampling = 1 else: oversampling = 16 for nc in np.unique(paradigm.con_id): onsets = paradigm.onset[paradigm.con_id == nc] nos = np.size(onsets) if paradigm.amplitude is not None: values = paradigm.amplitude[paradigm.con_id == nc] else: values = np.ones(nos) if nos < 1: continue if paradigm.type == 'event': duration = np.zeros_like(onsets) else: duration = paradigm.duration[paradigm.con_id == nc] exp_condition = (onsets, duration, values) reg, names = compute_regressor( exp_condition, hrf_model, frametimes, con_id=nc, fir_delays=fir_delays, oversampling=oversampling, min_onset=min_onset) hnames += names if rmatrix is None: rmatrix = reg else: rmatrix = np.hstack((rmatrix, reg)) return rmatrix, hnames def _full_rank(X, cmax=1e15): """ This function possibly adds a scalar matrix to X to guarantee that the condition number is smaller than a given threshold. Parameters ---------- X: array of shape(nrows, ncols) cmax=1.e-15, float tolerance for condition number Returns ------- X: array of shape(nrows, ncols) after regularization cmax=1.e-15, float tolerance for condition number """ U, s, V = np.linalg.svd(X, 0) smax, smin = s.max(), s.min() c = smax / smin if c < cmax: return X, c warn('Matrix is singular at working precision, regularizing...') lda = (smax - cmax * smin) / (cmax - 1) s = s + lda X = np.dot(U, np.dot(np.diag(s), V)) return X, cmax ###################################################################### # Design matrix ###################################################################### class DesignMatrix: """ This is a container for a light-weight class for design matrices This class is only used to make IO and visualization. Attributes ---------- matrix: array of shape (n_scans, n_regressors) the numerical specification of the matrix. names: list of len (n_regressors) the names associated with the columns. frametimes: array of shape (n_scans), optional the occurrence time of the matrix rows. """ def __init__(self, matrix, names, frametimes=None): """ """ matrix_ = np.atleast_2d(matrix) if matrix_.shape[1] != len(names): raise ValueError( 'The number of names should equate the number of columns') if frametimes is not None: if frametimes.size != matrix.shape[0]: raise ValueError( 'The number %d of frametimes is different from the' + \ 'number %d of rows' % (frametimes.size, matrix.shape[0])) self.frametimes = np.asarray(frametimes, dtype=np.float64) self.matrix = matrix_ self.names = names def write_csv(self, path): """ write self.matrix as a csv file with appropriate column names Parameters ---------- path: string, path of the resulting csv file Notes ----- The frametimes are not written """ import csv with open(path, "w", newline='') as fid: writer = csv.writer(fid) writer.writerow(self.names) writer.writerows(self.matrix) def show(self, rescale=True, ax=None, cmap=None): """Visualization of a design matrix Parameters ---------- rescale: bool, optional rescale columns magnitude for visualization or not. ax: axis handle, optional Handle to axis onto which we will draw design matrix. cmap: colormap, optional Matplotlib colormap to use, passed to `imshow`. Returns ------- ax: axis handle """ import matplotlib.pyplot as plt # normalize the values per column for better visualization x = self.matrix.copy() if rescale: x = x / np.sqrt(np.sum(x ** 2, 0)) if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) ax.imshow(x, interpolation='Nearest', aspect='auto', cmap=cmap) ax.set_label('conditions') ax.set_ylabel('scan number') if self.names is not None: ax.set_xticks(list(range(len(self.names)))) ax.set_xticklabels(self.names, rotation=60, ha='right') return ax def show_contrast(self, contrast, ax=None, cmap=None): """ Plot a contrast for a design matrix. Parameters ---------- contrast : np.float64 Array forming contrast with respect to the design matrix. ax: axis handle, optional Handle to axis onto which we will draw design matrix. cmap: colormap, optional Matplotlib colormap to use, passed to `imshow`. Returns ------- ax: axis handle """ import matplotlib.pyplot as plt contrast = np.atleast_2d(contrast) # normalize the values per column for better visualization if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) ax.imshow(contrast, interpolation='Nearest', aspect='auto', cmap=cmap) ax.set_label('conditions') ax.set_yticks(range(contrast.shape[0])) ax.set_yticklabels([]) if self.names is not None: ax.set_xticks(range(len(self.names))) ax.set_xticklabels(self.names, rotation=60, ha='right') return ax def make_dmtx(frametimes, paradigm=None, hrf_model='canonical', drift_model='cosine', hfcut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24): """ Generate a design matrix from the input parameters Parameters ---------- frametimes: array of shape(nbframes), the timing of the scans paradigm: Paradigm instance, optional description of the experimental paradigm hrf_model: string, optional, that specifies the hemodynamic response function. Can be one of {'canonical', 'canonical with derivative', 'fir', 'spm', 'spm_time', 'spm_time_dispersion'}. drift_model: string, optional specifies the desired drift model, to be chosen among 'polynomial', 'cosine', 'blank' hfcut: float, optional cut period of the low-pass filter drift_order: int, optional order of the drift model (in case it is polynomial) fir_delays: array of shape(nb_onsets) or list, optional, in case of FIR design, yields the array of delays used in the FIR model add_regs: array of shape(nbframes, naddreg), optional additional user-supplied regressors add_reg_names: list of (naddreg) regressor names, optional if None, while naddreg>0, these will be termed 'reg_%i',i=0..naddreg-1 min_onset: float, optional minimal onset relative to frametimes[0] (in seconds) events that start before frametimes[0] + min_onset are not considered Returns ------- DesignMatrix instance """ # check arguments frametimes = np.asarray(frametimes, dtype=np.float64) # check that additional regressor specification is correct n_add_regs = 0 if add_regs is not None: if add_regs.shape[0] == np.size(add_regs): add_regs = np.reshape(add_regs, (np.size(add_regs), 1)) n_add_regs = add_regs.shape[1] assert add_regs.shape[0] == np.size(frametimes), \ ValueError( 'incorrect specification of additional regressors: ' f'length of regressors provided: {add_regs.shape[0]}, number of ' f'time-frames: {np.size(frametimes)}') # check that additional regressor names are well specified if add_reg_names is None: add_reg_names = ['reg%d' % k for k in range(n_add_regs)] elif len(add_reg_names) != n_add_regs: raise ValueError( 'Incorrect number of additional regressor names was provided' '(%s provided, %s expected) % (len(add_reg_names),' 'n_add_regs)') # computation of the matrix names = [] matrix = np.zeros((frametimes.size, 0)) # step 1: paradigm-related regressors if paradigm is not None: # create the condition-related regressors matrix, names = _convolve_regressors( paradigm, hrf_model.lower(), frametimes, fir_delays, min_onset) # step 2: additional regressors if add_regs is not None: # add user-supplied regressors and corresponding names matrix = np.hstack((matrix, add_regs)) names += add_reg_names # setp 3: drifts drift, dnames = _make_drift(drift_model.lower(), frametimes, drift_order, hfcut) matrix = np.hstack((matrix, drift)) names += dnames # step 4: Force the design matrix to be full rank at working precision matrix, _ = _full_rank(matrix) # complete the names with the drift terms return DesignMatrix(matrix, names, frametimes) def dmtx_from_csv(path, frametimes=None): """ Return a DesignMatrix instance from a csv file Parameters ---------- path: string, path of the .csv file Returns ------- A DesignMatrix instance """ import csv with open(path, newline='') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) csvfile.seek(0) reader = csv.reader(csvfile, dialect) boolfirst = True design = [] for row in reader: if boolfirst: names = [row[j] for j in range(len(row))] boolfirst = False else: design.append([row[j] for j in range(len(row))]) x = np.array([[float(t) for t in xr] for xr in design]) return(DesignMatrix(x, names, frametimes)) def dmtx_light(frametimes, paradigm=None, hrf_model='canonical', drift_model='cosine', hfcut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24, path=None): """Make a design matrix while avoiding framework Parameters ---------- see make_dmtx, plus path: string, optional: a path to write the output Returns ------- dmtx array of shape(nreg, nbframes): the sampled design matrix names list of strings of len (nreg) the names of the columns of the design matrix """ dmtx_ = make_dmtx(frametimes, paradigm, hrf_model, drift_model, hfcut, drift_order, fir_delays, add_regs, add_reg_names, min_onset) if path is not None: dmtx_.write_csv(path) return dmtx_.matrix, dmtx_.names nipy-0.6.1/nipy/modalities/fmri/experimental_paradigm.py000066400000000000000000000175361470056100100234640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements an object to deal with experimental paradigms. In fMRI data analysis, there are two main types of experimental paradigms: block and event-related paradigms. They correspond to 2 classes EventRelatedParadigm and BlockParadigm. Both are implemented here, together with functions to write paradigms to csv files. Notes ----- Although the Paradigm object have no notion of session or acquisitions (they are assumed to correspond to a sequential acquisition, called 'session' in SPM jargon), the .csv file used to represent paradigm may be multi-session, so it is assumed that the first column of a file yielding a paradigm is in fact a session index Author: Bertrand Thirion, 2009-2011 """ import numpy as np ########################################################## # Paradigm handling ########################################################## class Paradigm: """ Simple class to handle the experimental paradigm in one session """ def __init__(self, con_id=None, onset=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional identifier of the events onset: array of shape (n_events), type = float, optional, onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ self.con_id = con_id self.onset = onset self.amplitude = amplitude if con_id is not None: self.n_events = len(con_id) self.con_id = np.ravel(np.array(con_id)).astype('str') if onset is not None: if len(onset) != self.n_events: raise ValueError( 'inconsistent definition of ids and onsets') self.onset = np.ravel(np.array(onset)).astype(np.float64) if amplitude is not None: if len(amplitude) != self.n_events: raise ValueError('inconsistent definition of amplitude') self.amplitude = np.ravel(np.array(amplitude)) self.type = 'event' self.n_conditions = len(np.unique(self.con_id)) def write_to_csv(self, csv_file, session='0'): """ Write the paradigm to a csv file Parameters ---------- csv_file: string, path of the csv file session: string, optional, session identifier """ import csv with open(csv_file, "w", newline='') as fid: writer = csv.writer(fid, delimiter=' ') n_pres = np.size(self.con_id) sess = np.repeat(session, n_pres) pdata = np.vstack((sess, self.con_id, self.onset)).T # add the duration information if self.type == 'event': duration = np.zeros(np.size(self.con_id)) else: duration = self.duration pdata = np.hstack((pdata, np.reshape(duration, (n_pres, 1)))) # add the amplitude information if self.amplitude is not None: amplitude = np.reshape(self.amplitude, (n_pres, 1)) pdata = np.hstack((pdata, amplitude)) # write pdata for row in pdata: writer.writerow(row) class EventRelatedParadigm(Paradigm): """ Class to handle event-related paradigms """ def __init__(self, con_id=None, onset=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional id of the events (name of the experimental condition) onset: array of shape (n_events), type = float, optional onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ Paradigm.__init__(self, con_id, onset, amplitude) class BlockParadigm(Paradigm): """ Class to handle block paradigms """ def __init__(self, con_id=None, onset=None, duration=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional id of the events (name of the experimental condition) onset: array of shape (n_events), type = float, optional onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ Paradigm.__init__(self, con_id, onset, amplitude) self.duration = duration self.type = 'block' if duration is not None: if len(duration) != self.n_events: raise ValueError('inconsistent definition of duration') self.duration = np.ravel(np.array(duration)) def load_paradigm_from_csv_file(path, session=None): """ Read a (.csv) paradigm file consisting of values yielding (occurrence time, (duration), event ID, modulation) and returns a paradigm instance or a dictionary of paradigm instances Parameters ---------- path: string, path to a .csv file that describes the paradigm session: string, optional, session identifier by default the output is a dictionary of session-level dictionaries indexed by session Returns ------- paradigm, paradigm instance (if session is provided), or dictionary of paradigm instances otherwise, the resulting session-by-session paradigm Notes ----- It is assumed that the csv file contains the following columns: (session id, condition id, onset), plus possibly (duration) and/or (amplitude). If all the durations are 0, the paradigm will be handled as event-related. FIXME: would be much clearer if amplitude was put before duration in the .csv """ import csv with open(path, newline='') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) csvfile.seek(0) reader = csv.reader(csvfile, dialect) # load the csv as a paradigm array sess, cid, onset, amplitude, duration = [], [], [], [], [] for row in reader: sess.append(row[0]) cid.append(row[1]) onset.append(float(row[2])) if len(row) > 3: duration.append(float(row[3])) if len(row) > 4: amplitude.append(row[4]) paradigm_info = [np.array(sess), np.array(cid), np.array(onset), np.array(duration), np.array(amplitude)] paradigm_info = paradigm_info[:len(row)] def read_session(paradigm_info, session): """ return a paradigm instance corresponding to session """ ps = (paradigm_info[0] == session) if np.sum(ps) == 0: return None ampli = np.ones(np.sum(ps)) if len(paradigm_info) > 4: _, cid, onset, duration, ampli = (lp[ps] for lp in paradigm_info) if (duration == 0).all(): paradigm = EventRelatedParadigm(cid, onset, ampli) else: paradigm = BlockParadigm(cid, onset, duration, ampli) elif len(paradigm_info) > 3: _, cid, onset, duration = (lp[ps] for lp in paradigm_info) paradigm = BlockParadigm(cid, onset, duration, ampli) else: _, cid, onset = (lp[ps] for lp in paradigm_info) paradigm = EventRelatedParadigm(cid, onset, ampli) return paradigm sessions = np.unique(paradigm_info[0]) if session is None: paradigm = {} for session in sessions: paradigm[session] = read_session(paradigm_info, session) else: paradigm = read_session(paradigm_info, session) return paradigm nipy-0.6.1/nipy/modalities/fmri/fmri.py000066400000000000000000000115331470056100100200470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ...core.api import ImageList class FmriImageList(ImageList): """ Class to implement image list interface for FMRI time series Allows metadata such as volume and slice times """ def __init__(self, images=None, volume_start_times=None, slice_times=None): """ An implementation of an fMRI image as in ImageList Parameters ---------- images : iterable an iterable object whose items are meant to be images; this is checked by asserting that each has a `coordmap` attribute and a ``get_fdata`` method. Note that Image objects are not iterable by default; use the ``from_image`` classmethod or ``iter_axis`` function to convert images to image lists - see examples below for the latter. volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float64)`` slice_times: None or (N,) ndarray specifying offset for each slice of each frame, from the frame start time. See Also -------- nipy.core.image_list.ImageList Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> from nipy.core.api import iter_axis >>> funcim = load_image(funcfile) >>> iterable_img = iter_axis(funcim, 't') >>> fmrilist = FmriImageList(iterable_img) >>> print(fmrilist.get_list_data(axis=0).shape) (20, 17, 21, 3) >>> print(fmrilist[4].shape) (17, 21, 3) """ ImageList.__init__(self, images=images) if volume_start_times is None: volume_start_times = 1. v = np.asarray(volume_start_times) length = len(self.list) if v.shape == (length,): self.volume_start_times = volume_start_times else: v = float(volume_start_times) self.volume_start_times = np.arange(length) * v self.slice_times = slice_times def __getitem__(self, index): """ If index is an index, return self.list[index], an Image else return an FmriImageList with images=self.list[index]. """ if type(index) is int: return self.list[index] return self.__class__( images=self.list[index], volume_start_times=self.volume_start_times[index], slice_times=self.slice_times) @classmethod def from_image(klass, fourdimage, axis='t', volume_start_times=None, slice_times=None): """Create an FmriImageList from a 4D Image Get images by extracting 3d images along the 't' axis. Parameters ---------- fourdimage : ``Image`` instance A 4D Image volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float64)`` slice_times: None or (N,) ndarray specifying offset for each slice of each frame, from the frame start time. Returns ------- filist : ``FmriImageList`` instance """ if fourdimage.ndim != 4: raise ValueError('expecting a 4-dimensional Image') image_list = ImageList.from_image(fourdimage, axis) return klass(images=image_list.list, volume_start_times=volume_start_times, slice_times=slice_times) def axis0_generator(data, slicers=None): """ Takes array-like `data`, returning slices over axes > 0 This function takes an array-like object `data` and yields tuples of slicing thing and slices like:: [slicer, np.asarray(data)[:,slicer] for slicer in slicer] which in the default (`slicers` is None) case, boils down to:: [i, np.asarray(data)[:,i] for i in range(data.shape[1])] This can be used to get arrays of time series out of an array if the time axis is axis 0. Parameters ---------- data : array-like object such that ``arr = np.asarray(data)`` returns an array of at least 2 dimensions. slicers : None or sequence sequence of objects that can be used to slice into array ``arr`` returned from data. If None, default is ``range(data.shape[1])`` """ arr = np.asarray(data) if slicers is None: slicers = range(arr.shape[1]) for slicer in slicers: yield slicer, arr[:,slicer] nipy-0.6.1/nipy/modalities/fmri/fmristat/000077500000000000000000000000001470056100100203665ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/fmristat/__init__.py000066400000000000000000000004171470056100100225010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module is meant to reproduce the GLM analysis of fmristat. Liao et al. (2002). TODO fix reference here """ __docformat__ = 'restructuredtext' nipy-0.6.1/nipy/modalities/fmri/fmristat/api.py000066400000000000000000000002661470056100100215150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .hrf import canonical as delay_hrf from .model import AR1, OLS nipy-0.6.1/nipy/modalities/fmri/fmristat/hrf.py000066400000000000000000000157031470056100100215250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Computation of the canonical HRF used in fMRIstat, both the 2-term spectral approximation and the Taylor series approximation, to a shifted version of the canonical Glover HRF. References ---------- Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., Evans, A.C. (2002). \'Estimating the delay of the response in fMRI data.\' NeuroImage, 16:593-606. """ import numpy as np import numpy.linalg as npl from sympy.utilities.lambdify import implemented_function from .. import hrf from ..utils import Interp1dNumeric, T, lambdify_t from .invert import invertR def spectral_decomposition(hrf2decompose, time=None, delta=None, ncomp=2): """ PCA decomposition of symbolic HRF shifted over time Perform a PCA expansion of a symbolic HRF, time shifted over the values in delta, returning the first ncomp components. This smooths out the HRF as compared to using a Taylor series approximation. Parameters ---------- hrf2decompose : sympy expression An expression that can be lambdified as a function of 't'. This is the HRF to be expanded in PCA time : None or np.ndarray, optional None gives default value of np.linspace(-15,50,3251) chosen to match fMRIstat implementation. This corresponds to a time interval of 0.02. Presumed to be equally spaced. delta : None or np.ndarray, optional None results in default value of np.arange(-4.5, 4.6, 0.1) chosen to match fMRIstat implementation. ncomp : int, optional Number of principal components to retain. Returns ------- hrf : [sympy expressions] A sequence length `ncomp` of symbolic HRFs that are the principal components. approx : TODO """ if time is None: time = np.linspace(-15,50,3251) dt = time[1] - time[0] if delta is None: delta = np.arange(-4.5, 4.6, 0.1) # make numerical implementation from hrf function and symbol t. # hrft returns function values when called with values for time as # input. hrft = lambdify_t(hrf2decompose(T)) # Create stack of time-shifted HRFs. Time varies over row, delta # over column. ts_hrf_vals = np.array([hrft(time - d) for d in delta]).T ts_hrf_vals = np.nan_to_num(ts_hrf_vals) # PCA U, S, V = npl.svd(ts_hrf_vals, full_matrices=0) # make interpolators from the generated bases basis = [] for i in range(ncomp): b = Interp1dNumeric(time, U[:, i], bounds_error=False, fill_value=0.) # normalize components with integral of abs of first component if i == 0: d = np.fabs((b(time) * dt).sum()) b.y /= d basis.append(b) # reconstruct time courses for all bases W = np.array([b(time) for b in basis]).T # regress basis time courses against original time shifted time # courses, ncomps by len(delta) parameter matrix WH = np.dot(npl.pinv(W), ts_hrf_vals) # put these into interpolators to get estimated coefficients for any # value of delta coef = [Interp1dNumeric(delta, w, bounds_error=False, fill_value=0.) for w in WH] # swap sign of first component to match that of input HRF. Swap # other components if we swap the first, to standardize signs of # components across SVD implementations. if coef[0](0) < 0: # coefficient at time shift of 0 for i in range(ncomp): coef[i].y *= -1. basis[i].y *= -1. def approx(time, delta): value = 0 for i in range(ncomp): value += coef[i](delta) * basis[i](time) return value approx.coef = coef approx.components = basis (approx.theta, approx.inverse, approx.dinverse, approx.forward, approx.dforward) = invertR(delta, approx.coef) # construct aliased functions from bases symbasis = [] for i, b in enumerate(basis): symbasis.append( implemented_function('%s%d' % (str(hrf2decompose), i), b)) return symbasis, approx def taylor_approx(hrf2decompose, time=None, delta=None): """ A Taylor series approximation of an HRF shifted by times `delta` Returns original HRF and gradient of HRF Parameters ---------- hrf2decompose : sympy expression An expression that can be lambdified as a function of 't'. This is the HRF to be expanded in PCA time : None or np.ndarray, optional None gives default value of np.linspace(-15,50,3251) chosen to match fMRIstat implementation. This corresponds to a time interval of 0.02. Presumed to be equally spaced. delta : None or np.ndarray, optional None results in default value of np.arange(-4.5, 4.6, 0.1) chosen to match fMRIstat implementation. Returns ------- hrf : [sympy expressions] Sequence length 2 comprising (`hrf2decompose`, ``dhrf``) where ``dhrf`` is the first derivative of `hrf2decompose`. approx : TODO References ---------- Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., Evans, A.C. (2002). \'Estimating the delay of the response in fMRI data.\' NeuroImage, 16:593-606. """ if time is None: time = np.linspace(-15,50,3251) dt = time[1] - time[0] if delta is None: delta = np.arange(-4.5, 4.6, 0.1) # make numerical implementation from hrf function and symbol t. # hrft returns function values when called with values for time as # input. hrft = lambdify_t(hrf2decompose(T)) # interpolator for negative gradient of hrf dhrft = Interp1dNumeric(time, -np.gradient(hrft(time), dt), bounds_error=False, fill_value=0.) dhrft.y *= 2 # Create stack of time-shifted HRFs. Time varies over row, delta # over column. ts_hrf_vals = np.array([hrft(time - d) for d in delta]).T # hrf, dhrf W = np.array([hrft(time), dhrft(time)]).T # regress hrf, dhrf at times against stack of time-shifted hrfs WH = np.dot(npl.pinv(W), ts_hrf_vals) # put these into interpolators to get estimated coefficients for any # value of delta coef = [Interp1dNumeric(delta, w, bounds_error=False, fill_value=0.) for w in WH] def approx(time, delta): value = (coef[0](delta) * hrft(time) + coef[1](delta) * dhrft(time)) return value approx.coef = coef approx.components = [hrft, dhrft] (approx.theta, approx.inverse, approx.dinverse, approx.forward, approx.dforward) = invertR(delta, approx.coef) dhrf = implemented_function(f'd{hrf2decompose}', dhrft) return [hrf2decompose, dhrf], approx canonical, canonical_approx = taylor_approx(hrf.glover) spectral, spectral_approx = spectral_decomposition(hrf.glover) nipy-0.6.1/nipy/modalities/fmri/fmristat/invert.py000066400000000000000000000036371470056100100222600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __docformat__ = 'restructuredtext' import numpy as np from nipy.algorithms.statistics.models.nlsmodel import NLSModel def invertR(delta, IRF, niter=20): """ If IRF has 2 components (w0, w1) return an estimate of the inverse of r=w1/w0, as in Liao et al. (2002). Fits a simple arctan model to the ratio w1/w0. """ R = IRF[1](delta) / IRF[0](delta) def f(x, theta): a, b, c = theta _x = x[:,0] return a * np.arctan(b * _x) + c def grad(x, theta): a, b, c = theta value = np.zeros((3, x.shape[0])) _x = x[:,0] value[0] = np.arctan(b * _x) value[1] = a / (1. + np.power((b * _x), 2.)) * _x value[2] = 1. return value.T c = delta.max() / (np.pi/2) n = delta.shape[0] delta0 = ((delta[n // 2 + 2] - delta[n // 2 + 1]) / (R[n // 2 + 2] - R[n // 2 + 1])) if delta0 < 0: c = (delta.max() / (np.pi/2)) * 1.2 else: c = -(delta.max() / (np.pi/2)) * 1.2 design = R.reshape(R.shape[0], 1) model = NLSModel(Y=delta, design=design, f=f, grad=grad, theta=np.array([4., 0.5, 0]), niter=niter) for iteration in model: next(model) a, b, c = model.theta def _deltahat(r): return a * np.arctan(b * r) + c def _ddeltahat(r): return a * b / (1 + (b * r)**2) def _deltahatinv(d): return np.tan((d - c) / a) / b def _ddeltahatinv(d): return 1. / (a * b * np.cos((d - c) / a)**2) for fn in [_deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv]: setattr(fn, 'a', a) setattr(fn, 'b', b) setattr(fn, 'c', c) return model.theta, _deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv nipy-0.6.1/nipy/modalities/fmri/fmristat/model.py000066400000000000000000000410371470056100100220450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module defines the two default GLM passes of fmristat The results of both passes of the GLM get pushed around by generators, which know how to get out the (probably 3D) data for each slice, or parcel (for the AR) case, estimate in 2D, then store the data back again in its original shape. The containers here, in the execute methods, know how to reshape the data on the way into the estimation (to 2D), then back again, to 3D, or 4D. It's relatively easy to do this when just iterating over simple slices, but it gets a bit more complicated when taking arbitrary shaped samples from the image, as we do for estimating the AR coefficients, where we take all the voxels with similar AR coefficients at once. """ import copy from os import path import numpy as np import numpy.linalg as npl from nipy.algorithms.statistics.formula import make_recarray from nipy.algorithms.statistics.models.regression import ( ARModel, OLSModel, ar_bias_correct, ar_bias_corrector, ) # nipy core imports from nipy.core.api import AffineTransform, Image, matrix_generator, parcels # nipy IO imports from nipy.io.api import save_image # fmri imports from ..api import FmriImageList, axis0_generator from . import outputters class ModelOutputImage: """ These images have their values filled in as the model is fit, and are saved to disk after being completely filled in. They are saved to disk by calling the 'save' method. The __getitem__ and __setitem__ calls are delegated to a private Image. An exception is raised if trying to get/set data after the data has been saved to disk. """ def __init__(self, filename, coordmap, shape, clobber=False): self.filename = filename self._im_data = np.zeros(shape) self._im = Image(self._im_data, coordmap) # Using a dangerous undocumented API here self.clobber = clobber self._flushed = False def save(self): """ Save current Image data to disk """ if not self.clobber and path.exists(self.filename): raise ValueError('trying to clobber existing file') save_image(self._im, self.filename) self._flushed = True del(self._im) def __getitem__(self, item): if self._flushed: raise ValueError('trying to read value from a ' 'saved ModelOutputImage') return self._im_data[item] def __setitem__(self, item, value): if self._flushed: raise ValueError('trying to set value on saved' 'ModelOutputImage') self._im_data[item] = value def model_generator(formula, data, volume_start_times, iterable=None, slicetimes=None, model_type=OLSModel, model_params = lambda x: ()): """ Generator for the models for a pass of fmristat analysis. """ volume_start_times = make_recarray(volume_start_times.astype(float), 't') # Generator for slices of the data with time as first axis axis0_gen = axis0_generator(data, slicers=iterable) # Iterate over 2D slices of the data for indexer, indexed_data in matrix_generator(axis0_gen): model_args = model_params(indexer) # model may depend on i # Get the design for these volume start times design = formula.design(volume_start_times, return_float=True) # Make the model from the design rmodel = model_type(design, *model_args) yield indexer, indexed_data, rmodel def results_generator(model_iterable): """ Generator for results from an iterator that returns (index, data, model) tuples. See model_generator. """ for i, d, m in model_iterable: yield i, m.fit(d) class OLS: """ First pass through fmri_image. Parameters ---------- fmri_image : `FmriImageList` or 4D image object returning 4D data from np.asarray, with first (``object[0]``) axis being the independent variable of the model; object[0] returns an object with attribute ``shape``. formula : :class:`nipy.algorithms.statistics.formula.Formula` outputs : list Store for model outputs. volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float64)`` Raises ------ ValueError If `volume_start_times` not specified, and 4D image passed. """ def __init__(self, fmri_image, formula, outputs=None, volume_start_times=None): self.fmri_image = fmri_image try: self.data = fmri_image.get_fdata() except AttributeError: self.data = fmri_image.get_list_data(axis=0) self.formula = formula self.outputs = outputs if outputs else [] if volume_start_times is None: volume_start_times = getattr(fmri_image, 'volume_start_times', None) if volume_start_times is None: raise ValueError('Must specify start times for 4D image input') self.volume_start_times = volume_start_times def execute(self): m = model_generator(self.formula, self.data, self.volume_start_times, model_type=OLSModel) r = results_generator(m) def reshape(i, x): if len(x.shape) == 2: if type(i) is int: x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:] if type(i) not in [type([]), type(())]: i = (i,) else: i = tuple(i) i = (slice(None,None,None),) + tuple(i) else: if type(i) is int: x.shape = self.fmri_image[0].shape[1:] return i, x generate_output(self.outputs, r, reshape=reshape) def estimateAR(resid, design, order=1): """ Estimate AR parameters using bias correction from fMRIstat. Parameters ---------- resid: array-like residuals from model model: an OLS model used to estimate residuals Returns ------- output : array shape (order, resid """ invM = ar_bias_corrector(design, npl.pinv(design), order) return ar_bias_correct(resid, order, invM) class AR1(OLS): """ Second pass through fmri_image. Parameters ---------- fmri_image : `FmriImageList` object returning 4D array from ``np.asarray``, having attribute ``volume_start_times`` (if `volume_start_times` is None), and such that ``object[0]`` returns something with attributes ``shape`` formula : :class:`nipy.algorithms.statistics.formula.Formula` rho : ``Image`` image of AR(1) coefficients. Returning data from ``rho.get_fdata()``, and having attribute ``coordmap`` outputs : list Store for model outputs. volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float64)`` Raises ------ ValueError If `volume_start_times` not specified, and 4D image passed. """ def __init__(self, fmri_image, formula, rho, outputs=None, volume_start_times=None): super().__init__(fmri_image, formula, outputs, volume_start_times) # Cleanup rho values, truncate them to a scale of 0.01 g = copy.copy(rho.coordmap) rho = rho.get_fdata() m = np.isnan(rho) r = (np.clip(rho,-1,1) * 100).astype(np.int_) / 100. r[m] = np.inf self.rho = Image(r, g) def execute(self): iterable = parcels(self.rho, exclude=[np.inf]) def model_params(i): return (self.rho.get_fdata()[i].mean(),) # Generates indexer, data, model m = model_generator(self.formula, self.data, self.volume_start_times, iterable=iterable, model_type=ARModel, model_params=model_params) # Generates indexer, data, 2D results r = results_generator(m) def reshape(i, x): """ To write output, arrays have to be reshaped -- this function does the appropriate reshaping for the two passes of fMRIstat. These passes are: i) 'slices through the z-axis' ii) 'parcels of approximately constant AR1 coefficient' """ if len(x.shape) == 2: # 2D input matrix if type(i) is int: # integer indexing # reshape to ND (where N is probably 4) x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:] # Convert lists to tuples, put anything else into a tuple if type(i) not in [type([]), type(())]: i = (i,) else: i = tuple(i) # Add : to indexing i = (slice(None,None,None),) + tuple(i) else: # not 2D if type(i) is int: # integer indexing x.shape = self.fmri_image[0].shape[1:] return i, x # Put results pulled from results generator r, into outputs generate_output(self.outputs, r, reshape=reshape) def output_T(outbase, contrast, fmri_image, effect=True, sd=True, t=True, clobber=False): """ Return t contrast regression outputs list for `contrast` Parameters ---------- outbase : string Base filename that will be used to construct a set of files for the TContrast. For example, outbase='output.nii' will result in the following files (assuming defaults for all other params): output_effect.nii, output_sd.nii, output_t.nii contrast : array F contrast matrix fmri_image : ``FmriImageList`` or ``Image`` object such that ``object[0]`` has attributes ``shape`` and ``coordmap`` effect : {True, False}, optional whether to write an effect image sd : {True, False}, optional whether to write a standard deviation image t : {True, False}, optional whether to write a t image clobber : {False, True}, optional whether to overwrite images that exist. Returns ------- reglist : ``RegressionOutputList`` instance Regression output list with selected outputs, where selection is by inputs `effect`, `sd` and `t` Notes ----- Note that this routine uses the corresponding ``output_T`` routine in :mod:`outputters`, but indirectly via the TOutput object. """ def build_filename(label): index = outbase.find('.') return ''.join([outbase[:index], '_', label, outbase[index:]]) if effect: effectim = ModelOutputImage(build_filename('effect'), fmri_image[0].coordmap, fmri_image[0].shape, clobber=clobber) else: effectim = None if sd: sdim = ModelOutputImage(build_filename('sd'), fmri_image[0].coordmap, fmri_image[0].shape, clobber=clobber) else: sdim = None if t: tim = ModelOutputImage(build_filename('t'), fmri_image[0].coordmap,fmri_image[0].shape, clobber=clobber) else: tim = None return outputters.TOutput(contrast, effect=effectim, sd=sdim, t=tim) def output_F(outfile, contrast, fmri_image, clobber=False): ''' output F statistic images Parameters ---------- outfile : str filename for F contrast image contrast : array F contrast matrix fmri_image : ``FmriImageList`` or ``Image`` object such that ``object[0]`` has attributes ``shape`` and ``coordmap`` clobber : bool if True, overwrites previous output; if False, raises error Returns ------- f_reg_out : ``RegressionOutput`` instance Object that can a) be called with a results instance as argument, returning an array, and b) accept the output array for storing, via ``obj[slice_spec] = arr`` type slicing. ''' f = ModelOutputImage(outfile, fmri_image[0].coordmap, fmri_image[0].shape, clobber=clobber) return outputters.RegressionOutput(f, lambda x: outputters.output_F(x, contrast)) def output_AR1(outfile, fmri_image, clobber=False): """ Create an output file of the AR1 parameter from the OLS pass of fmristat. Parameters ---------- outfile : fmri_image : ``FmriImageList`` or 4D image object such that ``object[0]`` has attributes ``coordmap`` and ``shape`` clobber : bool if True, overwrite previous output Returns ------- regression_output : ``RegressionOutput`` instance """ outim = ModelOutputImage(outfile, fmri_image[0].coordmap, fmri_image[0].shape, clobber=clobber) return outputters.RegressionOutput(outim, outputters.output_AR1) def output_resid(outfile, fmri_image, clobber=False): """ Create an output file of the residuals parameter from the OLS pass of fmristat. Uses affine part of the first image to output resids unless fmri_image is an Image. Parameters ---------- outfile : fmri_image : ``FmriImageList`` or 4D image If ``FmriImageList``, needs attributes ``volume_start_times``, supports len(), and object[0] has attributes ``affine``, ``coordmap`` and ``shape``, from which we create a new 4D coordmap and shape If 4D image, use the images coordmap and shape clobber : bool if True, overwrite previous output Returns ------- regression_output : """ if isinstance(fmri_image, FmriImageList): n = len(fmri_image.list) T = np.zeros((5,5)) g = fmri_image[0].coordmap T[1:,1:] = fmri_image[0].affine T[0,0] = (fmri_image.volume_start_times[1:] - fmri_image.volume_start_times[:-1]).mean() # FIXME: NIFTI specific naming here innames = ["t"] + list(g.function_domain.coord_names) outnames = ["t"] + list(g.function_range.coord_names) cmap = AffineTransform.from_params(innames, outnames, T) shape = (n,) + fmri_image[0].shape elif isinstance(fmri_image, Image): cmap = fmri_image.coordmap shape = fmri_image.shape else: raise ValueError("expecting FmriImageList or 4d Image") outim = ModelOutputImage(outfile, cmap, shape, clobber=clobber) return outputters.RegressionOutput(outim, outputters.output_resid) def generate_output(outputs, iterable, reshape=lambda x, y: (x, y)): """ Write out results of a given output. In the regression setting, results is generally going to be a scipy.stats.models.model.LikelihoodModelResults instance. Parameters ---------- outputs : sequence sequence of output objects iterable : object Object which iterates, returning tuples of (indexer, results), where ``indexer`` can be used to index into the `outputs` reshape : callable accepts two arguments, first is the indexer, and the second is the array which will be indexed; returns modified indexer and array ready for slicing with modified indexer. """ for indexer, results in iterable: for output in outputs: # Might be regression output object if not hasattr(output, "list"): # lame test here k, d = reshape(indexer, output(results)) output[k] = d else: # or a regression output list (like a TOutput, with several # images to output to) r = output(results) for j, l in enumerate(output.list): k, d = reshape(indexer, r[j]) l[k] = d # flush outputs, if necessary for output in outputs: if isinstance(output, outputters.RegressionOutput): if hasattr(output.img, 'save'): output.img.save() elif isinstance(output, outputters.RegressionOutputList): for im in output.list: if hasattr(im, 'save'): im.save() nipy-0.6.1/nipy/modalities/fmri/fmristat/outputters.py000066400000000000000000000111771470056100100232050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Convenience functions and classes for statistics on images. These functions and classes support the return of statistical test results from iterations through data. The basic container here is the RegressionOutput. This does two basic things: * via __call__, processes a result object from a regression to produce something, usually an array * via slicing (__setitem__), it can store stuff, usually arrays. We use these by other objects (see algorithms.statistics.fmri.fmristat) slicing data out of images, fitting models to the data to create results objects, and then passing them to these here ``RegressionOutput`` containers via call, to get useful arrays, and then putting the results back into the ``RegressionOutput`` containers via slicing (__setitem__). """ __docformat__ = 'restructuredtext' import numpy as np def output_T(results, contrast, retvals=('effect', 'sd', 't')): """ Convenience function to collect t contrast results Parameters ---------- results : object implementing Tcontrast method contrast : array contrast matrix retvals : sequence, optional None or more of strings 'effect', 'sd', 't', where the presence of the string means that that output will be returned. Returns ------- res_list : list List of results. It will have the same length as `retvals` and the elements will be in the same order as retvals """ r = results.Tcontrast(contrast, store=retvals) returns = [] for valname in retvals: if valname == 'effect': returns.append(r.effect) if valname == 'sd': returns.append(r.sd) if valname == 't': returns.append(r.t) return returns def output_F(results, contrast): """ This convenience function outputs the results of an Fcontrast from a regression Parameters ---------- results : object implementing Tcontrast method contrast : array contrast matrix Returns ------- F : array array of F values """ return results.Fcontrast(contrast).F def output_resid(results): """ This convenience function outputs the residuals from a regression """ return results.resid class RegressionOutput: """ A class to output things in GLM passes through arrays of data. """ def __init__(self, img, fn, output_shape=None): """ Parameters ---------- img : ``Image`` instance The output Image fn : callable A function that is applied to a models.model.LikelihoodModelResults instance """ self.img = img self.fn = fn self.output_shape = output_shape def __call__(self, x): return self.fn(x) def __setitem__(self, index, value): self.img[index] = value class RegressionOutputList: """ A class to output more than one thing from a GLM pass through arrays of data. """ def __call__(self, x): return self.fn(x) def __init__(self, imgs, fn): """ Initialize regression output list Parameters ---------- imgs : list The list of output images fn : callable A function that is applied to a models.model.LikelihoodModelResults instance """ self.list = imgs self.fn = fn def __setitem__(self, index, value): self.list[index[0]][index[1:]] = value class TOutput(RegressionOutputList): """ Output contrast related to a T contrast from a GLM pass through data. """ def __init__(self, contrast, effect=None, sd=None, t=None): # Returns a list of arrays, being [effect, sd, t] when all these are not # None # Compile list of desired return values retvals = [] # Set self.list to contain selected input catching objects self.list = [] if effect is not None: retvals.append('effect') self.list.append(effect) if sd is not None: retvals.append('sd') self.list.append(sd) if t is not None: retvals.append('t') self.list.append(t) # Set return function to return selected inputs self.fn = lambda x: output_T(x, contrast, retvals) def output_AR1(results): """ Compute the usual AR(1) parameter on the residuals from a regression. """ resid = results.resid rho = np.add.reduce(resid[0:-1]*resid[1:] / np.add.reduce(resid[1:-1]**2)) return rho nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/000077500000000000000000000000001470056100100215305ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/FIACdesigns.py000066400000000000000000004625131470056100100241740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np # number of scans in design (rows in design matrix) N_ROWS = 191 # This is the alternative design to the standard, here called # 'description' # subj3_evt_fonc1.txt altdescr = {'block':'''time,sentence,speaker 2.0,SSt,SSp 5.33,SSt,SSp 8.67,SSt,SSp 12.0,SSt,SSp 15.33,SSt,SSp 18.67,SSt,SSp 31.0,SSt,SSp 34.33,SSt,SSp 37.67,SSt,SSp 41.0,SSt,SSp 44.33,SSt,SSp 47.67,SSt,SSp 60.0,SSt,DSp 63.33,SSt,DSp 66.67,SSt,DSp 70.0,SSt,DSp 73.33,SSt,DSp 76.67,SSt,DSp 89.0,DSt,SSp 92.33,DSt,SSp 95.67,DSt,SSp 99.0,DSt,SSp 102.33,DSt,SSp 105.67,DSt,SSp 118.0,DSt,DSp 121.33,DSt,DSp 124.67,DSt,DSp 128.0,DSt,DSp 131.33,DSt,DSp 134.67,DSt,DSp 147.0,DSt,DSp 150.33,DSt,DSp 153.67,DSt,DSp 157.0,DSt,DSp 160.33,DSt,DSp 163.67,DSt,DSp 176.0,DSt,SSp 179.33,DSt,SSp 182.67,DSt,SSp 186.0,DSt,SSp 189.33,DSt,SSp 192.67,DSt,SSp 205.0,SSt,DSp 208.33,SSt,DSp 211.67,SSt,DSp 215.0,SSt,DSp 218.33,SSt,DSp 221.67,SSt,DSp 234.0,SSt,DSp 237.33,SSt,DSp 240.67,SSt,DSp 244.0,SSt,DSp 247.33,SSt,DSp 250.67,SSt,DSp 263.0,DSt,SSp 266.33,DSt,SSp 269.67,DSt,SSp 273.0,DSt,SSp 276.33,DSt,SSp 279.67,DSt,SSp 292.0,SSt,SSp 295.33,SSt,SSp 298.67,SSt,SSp 302.0,SSt,SSp 305.33,SSt,SSp 308.67,SSt,SSp 321.0,DSt,DSp 324.33,DSt,DSp 327.67,DSt,DSp 331.0,DSt,DSp 334.33,DSt,DSp 337.67,DSt,DSp 350.0,SSt,SSp 353.33,SSt,SSp 356.67,SSt,SSp 360.0,SSt,SSp 363.33,SSt,SSp 366.67,SSt,SSp 379.0,DSt,SSp 382.33,DSt,SSp 385.67,DSt,SSp 389.0,DSt,SSp 392.33,DSt,SSp 395.67,DSt,SSp 408.0,SSt,DSp 411.33,SSt,DSp 414.67,SSt,DSp 418.0,SSt,DSp 421.33,SSt,DSp 424.67,SSt,DSp 437.0,DSt,DSp 440.33,DSt,DSp 443.67,DSt,DSp 447.0,DSt,DSp 450.33,DSt,DSp 453.67,DSt,DSp''', 'event':'''time,sentence,speaker 2.0,DSt,DSp 5.33,SSt,SSp 8.67,DSt,SSp 12.0,DSt,DSp 15.33,SSt,DSp 18.67,DSt,SSp 22.0,SSt,SSp 25.33,DSt,SSp 28.67,SSt,SSp 32.0,SSt,DSp 35.33,SSt,SSp 38.67,SSt,DSp 42.0,DSt,DSp 45.33,SSt,SSp 48.67,DSt,SSp 52.0,SSt,DSp 55.33,SSt,SSp 58.67,DSt,SSp 62.0,DSt,DSp 65.33,SSt,SSp 68.67,DSt,SSp 72.0,SSt,SSp 75.33,SSt,DSp 78.67,DSt,DSp 82.0,SSt,SSp 85.33,DSt,DSp 88.67,SSt,SSp 92.0,DSt,DSp 95.33,DSt,SSp 98.67,DSt,DSp 102.0,DSt,SSp 105.33,SSt,SSp 108.67,SSt,DSp 112.0,DSt,DSp 115.33,SSt,DSp 118.67,SSt,SSp 122.0,DSt,SSp 125.33,DSt,DSp 128.67,DSt,SSp 132.0,DSt,DSp 135.33,DSt,SSp 138.67,SSt,DSp 142.0,DSt,DSp 145.33,SSt,SSp 148.67,SSt,DSp 152.0,SSt,SSp 155.33,SSt,DSp 158.67,SSt,SSp 162.0,SSt,DSp 165.33,DSt,SSp 168.67,SSt,SSp 172.0,DSt,SSp 175.33,DSt,DSp 178.67,SSt,SSp 182.0,DSt,DSp 185.33,SSt,DSp 188.67,SSt,SSp 192.0,SSt,DSp 195.33,SSt,SSp 198.67,DSt,DSp 202.0,SSt,DSp 205.33,DSt,SSp 208.67,DSt,DSp 212.0,SSt,SSp 215.33,SSt,DSp 218.67,DSt,SSp 222.0,SSt,SSp 225.33,DSt,DSp 228.67,SSt,SSp 232.0,DSt,DSp 235.33,DSt,SSp 238.67,DSt,DSp 242.0,SSt,SSp 245.33,DSt,DSp 248.67,SSt,DSp 252.0,DSt,SSp 255.33,DSt,DSp 258.67,SSt,DSp 262.0,DSt,DSp 265.33,DSt,SSp 268.67,DSt,DSp 272.0,DSt,SSp 275.33,DSt,DSp 278.67,SSt,SSp 282.0,DSt,SSp 285.33,SSt,DSp 288.67,DSt,DSp 292.0,DSt,SSp 295.33,SSt,DSp 298.67,DSt,DSp 302.0,DSt,SSp 305.33,SSt,DSp 308.67,DSt,SSp 312.0,SSt,DSp 315.33,SSt,SSp 318.67,DSt,SSp 322.0,SSt,SSp 325.33,DSt,DSp 328.67,SSt,SSp 332.0,SSt,DSp 335.33,SSt,SSp 338.67,DSt,SSp 342.0,DSt,DSp 345.33,SSt,DSp 348.67,DSt,DSp 352.0,SSt,DSp 355.33,SSt,SSp 358.67,DSt,DSp 362.0,SSt,DSp 365.33,DSt,SSp 368.67,SSt,DSp 372.0,DSt,SSp 375.33,DSt,DSp 378.67,DSt,SSp 382.0,SSt,SSp 385.33,SSt,DSp 388.67,DSt,SSp 392.0,SSt,SSp 395.33,SSt,DSp 398.67,DSt,SSp 402.0,DSt,DSp 405.33,DSt,SSp 408.67,SSt,SSp 412.0,DSt,SSp 415.33,SSt,SSp 418.67,DSt,DSp 422.0,DSt,SSp 425.33,SSt,DSp 428.67,SSt,SSp 432.0,DSt,DSp 435.33,SSt,DSp 438.67,SSt,SSp 442.0,DSt,DSp 445.33,SSt,DSp 448.67,DSt,SSp 452.0,DSt,DSp 455.33,SSt,DSp 458.67,DSt,SSp 462.0,SSt,SSp 465.33,DSt,DSp 468.67,SSt,SSp'''} # convert altdescr to recarray for convenience converters = float, str, str for key, value in altdescr.items(): lines = value.split('\n') names = lines.pop(0).strip().split(',') dtype = np.dtype(list(zip(names, ('f8', 'S3', 'S3')))) rec = np.recarray(shape=(len(lines),), dtype=dtype) for i, line in enumerate(lines): vals = line.strip().split(',') for name, val, conv in zip(names, vals, converters): rec[i][name] = conv(val) altdescr[key] = rec # standard analysis onsets event_dict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'} descriptions = {'event':""" 2.00 4 5.33 1 8.67 3 12.00 4 15.33 2 18.67 3 22.00 1 25.33 3 28.67 1 32.00 2 35.33 1 38.67 2 42.00 4 45.33 1 48.67 3 52.00 2 55.33 1 58.67 3 62.00 4 65.33 1 68.67 3 72.00 1 75.33 2 78.67 4 82.00 1 85.33 4 88.67 1 92.00 4 95.33 3 98.67 4 102.00 3 105.33 1 108.67 2 112.00 4 115.33 2 118.67 1 122.00 3 125.33 4 128.67 3 132.00 4 135.33 3 138.67 2 142.00 4 145.33 1 148.67 2 152.00 1 155.33 2 158.67 1 162.00 2 165.33 3 168.67 1 172.00 3 175.33 4 178.67 1 182.00 4 185.33 2 188.67 1 192.00 2 195.33 1 198.67 4 202.00 2 205.33 3 208.67 4 212.00 1 215.33 2 218.67 3 222.00 1 225.33 4 228.67 1 232.00 4 235.33 3 238.67 4 242.00 1 245.33 4 248.67 2 252.00 3 255.33 4 258.67 2 262.00 4 265.33 3 268.67 4 272.00 3 275.33 4 278.67 1 282.00 3 285.33 2 288.67 4 292.00 3 295.33 2 298.67 4 302.00 3 305.33 2 308.67 3 312.00 2 315.33 1 318.67 3 322.00 1 325.33 4 328.67 1 332.00 2 335.33 1 338.67 3 342.00 4 345.33 2 348.67 4 352.00 2 355.33 1 358.67 4 362.00 2 365.33 3 368.67 2 372.00 3 375.33 4 378.67 3 382.00 1 385.33 2 388.67 3 392.00 1 395.33 2 398.67 3 402.00 4 405.33 3 408.67 1 412.00 3 415.33 1 418.67 4 422.00 3 425.33 2 428.67 1 432.00 4 435.33 2 438.67 1 442.00 4 445.33 2 448.67 3 452.00 4 455.33 2 458.67 3 462.00 1 465.33 4 468.67 1 """, # subj3_bloc_fonc3.txt 'block':""" 2.00 1 5.33 1 8.67 1 12.00 1 15.33 1 18.67 1 31.00 1 34.33 1 37.67 1 41.00 1 44.33 1 47.67 1 60.00 2 63.33 2 66.67 2 70.00 2 73.33 2 76.67 2 89.00 3 92.33 3 95.67 3 99.00 3 102.33 3 105.67 3 118.00 4 121.33 4 124.67 4 128.00 4 131.33 4 134.67 4 147.00 4 150.33 4 153.67 4 157.00 4 160.33 4 163.67 4 176.00 3 179.33 3 182.67 3 186.00 3 189.33 3 192.67 3 205.00 2 208.33 2 211.67 2 215.00 2 218.33 2 221.67 2 234.00 2 237.33 2 240.67 2 244.00 2 247.33 2 250.67 2 263.00 3 266.33 3 269.67 3 273.00 3 276.33 3 279.67 3 292.00 1 295.33 1 298.67 1 302.00 1 305.33 1 308.67 1 321.00 4 324.33 4 327.67 4 331.00 4 334.33 4 337.67 4 350.00 1 353.33 1 356.67 1 360.00 1 363.33 1 366.67 1 379.00 3 382.33 3 385.67 3 389.00 3 392.33 3 395.67 3 408.00 2 411.33 2 414.67 2 418.00 2 421.33 2 424.67 2 437.00 4 440.33 4 443.67 4 447.00 4 450.33 4 453.67 4 """} # convert to record array for convenience dtype = np.dtype([('time', np.float64), ('event', 'S7')]) for key, txt in descriptions.items(): vals = np.fromstring(txt, sep='\t').reshape(-1, 2) full_def = np.zeros((vals.shape[0],), dtype=dtype) for i, row in enumerate(vals): full_def[i]['time' ] = row[0] full_def[i]['event'] = event_dict[row[1]] descriptions[key] = full_def # fmristat designs, probably saved from matlab to ascii fmristat = {'block': """ 0.0000000000000000e+00 1.7972165294585549e-07 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -7.0891758549395515e-07 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -1.0000000000000000e+00 1.0000000000000000e+00 -1.0000000000000000e+00 -0.0000000000000000e+00 -3.9964378119605467e+07 2.4177346180074877e-02 5.9438222395961854e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.7549188213126270e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.8947368421052628e-01 9.7905817174515231e-01 -9.6875229625309800e-01 -0.0000000000000000e+00 -3.9964295632754065e+07 2.9724288489710671e-01 6.7859854151592250e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.2825364543031706e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.7894736842105268e-01 9.5833795013850420e-01 -9.3816241434611469e-01 -0.0000000000000000e+00 -3.9964008717195466e+07 2.5803044610535053e-01 2.0702131854989700e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.2538113774670057e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.6842105263157896e-01 9.3783933518005547e-01 -9.0822335617436944e-01 -0.0000000000000000e+00 -3.9963382251577556e+07 1.5480779827902719e-02 3.5555300272627366e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.5615934487315666e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.5789473684210524e-01 9.1756232686980610e-01 -8.7892812363318262e-01 -0.0000000000000000e+00 -3.9962264238541424e+07 -8.5092843112443153e-02 4.1993036162424330e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.0730035819227872e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.4736842105263153e-01 8.9750692520775610e-01 -8.5026971861787415e-01 -0.0000000000000000e+00 -3.9960523102045782e+07 -6.7344467282778389e-02 3.9648730685263067e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.4795239339487262e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.3684210526315792e-01 8.7767313019390591e-01 -8.2224114302376450e-01 -0.0000000000000000e+00 -3.9958030461220443e+07 -3.0859121798647051e-02 3.4988988278635952e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 5.1950746816847776e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.2631578947368420e-01 8.5806094182825488e-01 -7.9483539874617293e-01 -0.0000000000000000e+00 -3.9954647886381753e+07 -1.0398603799754021e-02 3.0094602992319508e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.9200680125590358e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.1578947368421049e-01 8.3867036011080320e-01 -7.6804548768041980e-01 -0.0000000000000000e+00 -3.9950228902413361e+07 -2.8261891016884526e-03 1.9842544809308393e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.7957629818392778e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.0526315789473688e-01 8.1950138504155134e-01 -7.4186441172182538e-01 -0.0000000000000000e+00 -3.9944630184490673e+07 -6.5240578331303358e-04 4.1333332231519290e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 2.2566882506564701e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.9473684210526316e-01 8.0055401662049863e-01 -7.1628517276570935e-01 -0.0000000000000000e+00 -3.9937731831733234e+07 -1.3230474301884534e-04 -8.9013361783352618e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0200288654363485e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.8421052631578945e-01 7.8182825484764540e-01 -6.9130077270739165e-01 -0.0000000000000000e+00 -3.9929402377448291e+07 -2.2763919031965966e-05 -1.1818776925247122e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -3.3071814284000439e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.7368421052631584e-01 7.6332409972299176e-01 -6.6690421344219286e-01 -0.0000000000000000e+00 -3.9919467169778965e+07 1.1766933934135151e-01 -5.9145813637303944e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.1704903409713198e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.6315789473684212e-01 7.4504155124653748e-01 -6.4308849686543235e-01 -0.0000000000000000e+00 -3.9907786847505786e+07 3.3915151685855366e-01 7.8487423349090008e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.2055846003109769e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.5263157894736841e-01 7.2698060941828258e-01 -6.1984662487243036e-01 -0.0000000000000000e+00 -3.9894241441418923e+07 1.5626897220672240e-01 2.5758459445421755e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.3727553274059962e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.4210526315789469e-01 7.0914127423822704e-01 -5.9717159935850694e-01 -0.0000000000000000e+00 -3.9878693250274450e+07 -4.5889761416029337e-02 3.9139980364646210e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -9.7047116711321060e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.3157894736842108e-01 6.9152354570637120e-01 -5.7505642221898245e-01 -0.0000000000000000e+00 -3.9860998616045773e+07 -8.5485466293479639e-02 4.1644987530692945e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.5079498059630163e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.2105263157894737e-01 6.7412742382271473e-01 -5.5349409534917626e-01 -0.0000000000000000e+00 -3.9841029804515697e+07 -5.1613987307140528e-02 3.7656626544155047e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.4995785276060730e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.1052631578947365e-01 6.5695290858725752e-01 -5.3247762064440873e-01 -0.0000000000000000e+00 -3.9818654567553706e+07 -2.0586709118519942e-02 3.3320587820969133e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 4.7437405355933771e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -8.0000000000000004e-01 6.4000000000000012e-01 -5.1200000000000012e-01 -0.0000000000000000e+00 -3.9793725269769594e+07 -6.3149871015004810e-03 2.6901760355826837e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0881647626783833e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.8947368421052633e-01 6.2326869806094187e-01 -4.9205423531126991e-01 -0.0000000000000000e+00 -3.9766099107882999e+07 -1.5998995910758370e-03 1.3858542822449937e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 2.1519290836378713e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.7894736842105261e-01 6.0675900277008310e-01 -4.7263332847353839e-01 -0.0000000000000000e+00 -3.9735647326953202e+07 -3.4949313513852877e-04 -1.9968098640650647e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.9284674322244838e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.6842105263157889e-01 5.9047091412742370e-01 -4.5373028138212557e-01 -0.0000000000000000e+00 -3.9702236464460857e+07 -6.7774195098072875e-05 -1.1325358183759204e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.7893132438445457e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.5789473684210529e-01 5.7440443213296399e-01 -4.3533809593235173e-01 -0.0000000000000000e+00 -3.9665728276365325e+07 5.6890986261887112e-03 -1.0696208513639913e-01 2.4632390461375082e-03 0.0000000000000000e+00 0.0000000000000000e+00 -5.6615568209656034e-02 -7.8171058742844777e-03 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.4736842105263157e-01 5.5855955678670355e-01 -4.1744977401953637e-01 -0.0000000000000000e+00 -3.9625947171426475e+07 2.4618481081097995e-01 -6.1272673063231448e-02 4.8820311055440317e-02 0.0000000000000000e+00 0.0000000000000000e+00 -5.8086735980862475e-02 -1.0183399555242575e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.3684210526315785e-01 5.4293628808864258e-01 -4.0005831753899979e-01 -0.0000000000000000e+00 -3.9582766803961083e+07 2.9953964554960644e-01 -2.5547441406802145e-02 1.7507033112252593e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.0993666067196276e-02 -2.1612831441918828e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.2631578947368425e-01 5.2753462603878121e-01 -3.8315672838606218e-01 -0.0000000000000000e+00 -3.9536072560850807e+07 5.7109131744450661e-02 -8.4051340693276771e-03 3.2987495754170404e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.1758010932446473e-02 -1.8320362885086547e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.1578947368421053e-01 5.1235457063711909e-01 -3.6673800845604315e-01 -0.0000000000000000e+00 -3.9485728182394587e+07 -7.8184230485700307e-02 -2.2938316718133949e-03 4.1619910711870090e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.5232574314479738e-03 -2.7189893305956774e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -7.0526315789473681e-01 4.9739612188365645e-01 -3.5079515964426300e-01 -0.0000000000000000e+00 -3.9431584623730086e+07 -7.4737010936004322e-02 -5.3798391262850328e-04 4.0522771221040943e-01 0.0000000000000000e+00 0.0000000000000000e+00 -8.8267150904121708e-04 5.8945294122267095e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.9473684210526321e-01 4.8265927977839340e-01 -3.3532118384604176e-01 -0.0000000000000000e+00 -3.9373503040717266e+07 -3.7125986784173719e-02 -1.1129983958096788e-04 3.5823603012410249e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.9175152461996296e-04 5.6583906662152982e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.8421052631578949e-01 4.6814404432132967e-01 -3.2030908295669924e-01 -0.0000000000000000e+00 -3.9311345987254620e+07 -1.3178061904414793e-02 -2.0616437121920034e-05 3.1330364005523526e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.6864262677767610e-05 5.6795794533862881e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.7368421052631577e-01 4.5385041551246535e-01 -3.0575185887155559e-01 -0.0000000000000000e+00 -3.9244973542355932e+07 -3.7205065395061786e-03 -3.5244398305267092e-06 2.2498345090173064e-01 0.0000000000000000e+00 0.0000000000000000e+00 -6.4936311517650639e-06 1.5701149313902130e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.6315789473684206e-01 4.3977839335180047e-01 -2.9164251348593084e-01 -0.0000000000000000e+00 -3.9174248924560487e+07 -8.8460183856306875e-04 -5.1822314315757625e-07 7.3882604017788261e-02 0.0000000000000000e+00 0.0000000000000000e+00 -9.7632309208494808e-07 2.2974705727631178e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.5263157894736845e-01 4.2592797783933523e-01 -2.7797404869514508e-01 -0.0000000000000000e+00 -3.9099039317975588e+07 -1.8370553324891862e-04 0.0000000000000000e+00 -7.0043906737830369e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.3510500812682708e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -6.4210526315789473e-01 4.1229916897506924e-01 -2.6473946639451817e-01 -0.0000000000000000e+00 -3.9019207925871357e+07 -3.4179300052400812e-05 0.0000000000000000e+00 -1.2022600095442171e-01 1.4728700311903286e-05 0.0000000000000000e+00 0.0000000000000000e+00 -1.3519940771543261e-02 -5.5984441974886700e-05 0.0000000000000000e+00 1.0000000000000000e+00 -6.3157894736842102e-01 3.9889196675900274e-01 -2.5193176847937016e-01 -0.0000000000000000e+00 -3.8934600623322882e+07 6.1977227981245921e-02 0.0000000000000000e+00 -8.9600939014954029e-02 1.1919836474499302e-02 0.0000000000000000e+00 0.0000000000000000e+00 -6.4884679517546728e-02 -3.2555493185429440e-02 0.0000000000000000e+00 1.0000000000000000e+00 -6.2105263157894741e-01 3.8570637119113577e-01 -2.3954395684502119e-01 -0.0000000000000000e+00 -3.8845083451900810e+07 3.2914194698808724e-01 0.0000000000000000e+00 -4.4610650244984916e-02 9.0274941429971764e-02 0.0000000000000000e+00 0.0000000000000000e+00 -4.7586436324094594e-02 -1.5426326998121220e-01 0.0000000000000000e+00 1.0000000000000000e+00 -6.1052631578947369e-01 3.7274238227146816e-01 -2.2756903338679108e-01 -0.0000000000000000e+00 -3.8750522818035603e+07 2.0869631953634510e-01 0.0000000000000000e+00 -1.6794634932976529e-02 2.3929665270590575e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.1745661626648261e-02 -2.2651021459888929e-01 0.0000000000000000e+00 1.0000000000000000e+00 -5.9999999999999998e-01 3.5999999999999999e-01 -2.1599999999999997e-01 -0.0000000000000000e+00 -3.8650779152144141e+07 -1.8988164869436275e-02 0.0000000000000000e+00 -5.1018176378398936e-03 3.7764046922164829e-01 0.0000000000000000e+00 0.0000000000000000e+00 -7.4407761527462875e-03 -1.2483558646645473e-01 0.0000000000000000e+00 1.0000000000000000e+00 -5.8947368421052626e-01 3.4747922437673123e-01 -2.0482985857996788e-01 -0.0000000000000000e+00 -3.8545724148709796e+07 -8.7172921748424712e-02 0.0000000000000000e+00 -1.3055419048209041e-03 4.2010747575335811e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.0641445077479018e-03 2.0039325175152698e-02 0.0000000000000000e+00 1.0000000000000000e+00 -5.7894736842105265e-01 3.3518005540166207e-01 -1.9405161102201490e-01 -0.0000000000000000e+00 -3.8435214400087699e+07 -5.9479346491040953e-02 0.0000000000000000e+00 -2.9049537695812611e-04 3.8676128528650705e-01 0.0000000000000000e+00 0.0000000000000000e+00 -4.8684122664065948e-04 6.6822940192205887e-02 0.0000000000000000e+00 1.0000000000000000e+00 -5.6842105263157894e-01 3.2310249307479222e-01 -1.8365825922146084e-01 -0.0000000000000000e+00 -3.8319113881397702e+07 -2.5344894340844023e-02 0.0000000000000000e+00 -5.7519918573519355e-05 3.4173267838996024e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.0071519927700912e-04 4.8590711578671605e-02 0.0000000000000000e+00 1.0000000000000000e+00 -5.5789473684210522e-01 3.1124653739612185e-01 -1.7364280507362584e-01 -0.0000000000000000e+00 -3.8197280698436156e+07 -8.1360247239318281e-03 0.0000000000000000e+00 -1.0271890012879138e-05 2.8635449602721674e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.8603553893817027e-05 8.6957892817575433e-02 0.0000000000000000e+00 1.0000000000000000e+00 -5.4736842105263162e-01 2.9961218836565101e-01 -1.6399825047383004e-01 -0.0000000000000000e+00 -3.8069578593331799e+07 -2.1329811439999668e-03 0.0000000000000000e+00 -1.5726052562367482e-06 1.6942030072831732e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.9207518307516431e-06 1.9932735803028329e-01 0.0000000000000000e+00 1.0000000000000000e+00 -5.3684210526315790e-01 2.8819944598337949e-01 -1.5471759731739321e-01 -0.0000000000000000e+00 -3.7935869323744483e+07 -4.7868580374672127e-04 0.0000000000000000e+00 -2.4393255686410929e-07 9.7342091717260160e-03 0.0000000000000000e+00 0.0000000000000000e+00 -4.6354368728304449e-07 2.1317803332965357e-01 0.0000000000000000e+00 1.0000000000000000e+00 -5.2631578947368418e-01 2.7700831024930744e-01 -1.4579384749963548e-01 -0.0000000000000000e+00 -3.7796023122095928e+07 -9.4885576906897671e-05 0.0000000000000000e+00 0.0000000000000000e+00 -1.0342703349042752e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.8989839045089055e-02 0.0000000000000000e+00 1.0000000000000000e+00 -5.1578947368421058e-01 2.6603878116343493e-01 -1.3722000291587699e-01 -0.0000000000000000e+00 -3.7649889965446725e+07 4.6549987495046264e-04 0.0000000000000000e+00 0.0000000000000000e+00 -1.1370885399932430e-01 7.7849954623234125e-04 0.0000000000000000e+00 0.0000000000000000e+00 -4.6780737173119882e-02 -2.6406827272240662e-03 1.0000000000000000e+00 -5.0526315789473686e-01 2.5529085872576179e-01 -1.2898906546143754e-01 -0.0000000000000000e+00 -3.7497316265670031e+07 1.8286923577271125e-01 0.0000000000000000e+00 0.0000000000000000e+00 -7.0505792246302060e-02 3.3178956057083007e-02 0.0000000000000000e+00 0.0000000000000000e+00 -6.2077599161047464e-02 -7.6128408846089957e-02 1.0000000000000000e+00 -4.9473684210526314e-01 2.4476454293628808e-01 -1.2109403703163725e-01 -0.0000000000000000e+00 -3.7338169264406279e+07 3.2815365238396688e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.1075379328076348e-02 1.4440180014681392e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.6293517323374769e-02 -1.9992737502479768e-01 1.0000000000000000e+00 -4.8421052631578948e-01 2.3445983379501387e-01 -1.1352791952179618e-01 -0.0000000000000000e+00 -3.7172326619055934e+07 1.0472734947167857e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.0670392954156069e-02 3.0145227582186795e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.4579521804191179e-02 -2.0462195254207211e-01 1.0000000000000000e+00 -4.7368421052631576e-01 2.2437673130193903e-01 -1.0628371482723427e-01 -0.0000000000000000e+00 -3.6999660033267289e+07 -6.5414735178786182e-02 0.0000000000000000e+00 0.0000000000000000e+00 -3.0133373947628865e-03 4.0816930917890015e-01 0.0000000000000000e+00 0.0000000000000000e+00 -4.5547919780519388e-03 -5.7811109467925295e-02 1.0000000000000000e+00 -4.6315789473684210e-01 2.1451523545706372e-01 -9.9354424843271616e-02 -0.0000000000000000e+00 -3.6820030059284538e+07 -8.1039296440989866e-02 0.0000000000000000e+00 0.0000000000000000e+00 -7.2674473172627254e-04 4.1235355967765930e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.1786336808960263e-03 4.9499274042558168e-02 1.0000000000000000e+00 -4.5263157894736844e-01 2.0487534626038784e-01 -9.2733051465228172e-02 -0.0000000000000000e+00 -3.6633274691983856e+07 -4.4087905632133433e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.5386856254847681e-04 3.6716665813617538e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.6278123722066446e-04 6.1468774353944371e-02 1.0000000000000000e+00 -4.4210526315789472e-01 1.9545706371191135e-01 -8.6412596588423957e-02 -0.0000000000000000e+00 -3.6439258355741180e+07 -1.6550422413122053e-02 0.0000000000000000e+00 0.0000000000000000e+00 -2.9267163684834725e-05 3.2392301620679653e-01 0.0000000000000000e+00 0.0000000000000000e+00 -5.2002150652398645e-05 4.9847666173530675e-02 1.0000000000000000e+00 -4.3157894736842106e-01 1.8626038781163437e-01 -8.0386062108179043e-02 -0.0000000000000000e+00 -3.6237849955159605e+07 -4.8645759144098875e-03 0.0000000000000000e+00 0.0000000000000000e+00 -5.0502559169199467e-06 2.4859549248739221e-01 0.0000000000000000e+00 0.0000000000000000e+00 -9.2540988607474148e-06 1.3286328271100897e-01 1.0000000000000000e+00 -4.2105263157894735e-01 1.7728531855955676e-01 -7.4646449919813368e-02 -0.0000000000000000e+00 -3.6028915291220829e+07 -1.1930031026986410e-03 0.0000000000000000e+00 0.0000000000000000e+00 -7.5235207367785176e-07 1.0653571529948751e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.4109555146948659e-06 2.2584550280153473e-01 1.0000000000000000e+00 -4.1052631578947368e-01 1.6853185595567868e-01 -6.9186761918647033e-02 -0.0000000000000000e+00 -3.5812314029410847e+07 -2.5396202412826985e-04 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -4.6832193791313273e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.6614010275431002e-01 1.0000000000000000e+00 -4.0000000000000002e-01 1.6000000000000003e-01 -6.4000000000000015e-02 -0.0000000000000000e+00 -3.5587902398848765e+07 -4.8220518125413127e-05 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.1871326011304753e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0090386571392081e-02 1.0000000000000000e+00 -3.8947368421052631e-01 1.5168975069252078e-01 -5.9079166059192299e-02 -0.0000000000000000e+00 -3.5355541558413237e+07 2.4169016536933870e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -9.2791475308364513e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -8.0004825585489200e-02 1.0000000000000000e+00 -3.7894736842105264e-01 1.4360110803324100e-01 -5.4417261991543966e-02 -0.0000000000000000e+00 -3.5115099058204055e+07 2.9724155612227232e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.5270927425803231e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.8135969165363014e-01 1.0000000000000000e+00 -3.6842105263157893e-01 1.3573407202216065e-01 -5.0007289692374973e-02 -0.0000000000000000e+00 -3.4866436158003025e+07 2.5803024806313540e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.8621478092052027e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.5150110948521948e-01 1.0000000000000000e+00 -3.5789473684210527e-01 1.2808864265927977e-01 -4.5842251057005394e-02 -0.0000000000000000e+00 -3.4609428135132596e+07 1.5480751995147656e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.4898126191507456e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.6555402474497252e-01 1.0000000000000000e+00 -3.4736842105263160e-01 1.2066481994459835e-01 -4.1915147980755220e-02 -0.0000000000000000e+00 -3.4343941110639565e+07 -8.5092846828545440e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 4.1819492389675139e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -3.7787005240121178e-03 1.0000000000000000e+00 -3.3684210526315789e-01 1.1346260387811634e-01 -3.8218982358944449e-02 -0.0000000000000000e+00 -3.4069813706143320e+07 -6.7344467282778389e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.9609103273326435e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.4137932796112615e-02 1.0000000000000000e+00 -3.2631578947368423e-01 1.0648199445983381e-01 -3.4746756086893135e-02 -0.0000000000000000e+00 -3.3786908936772466e+07 -3.0859121798647051e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.4980970938405542e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 5.1811469414551475e-02 1.0000000000000000e+00 -3.1578947368421051e-01 9.9722991689750684e-02 -3.1491471059921269e-02 -0.0000000000000000e+00 -3.3495079538397674e+07 -1.0398603799754021e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.0093145263337484e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.9174443414894288e-02 1.0000000000000000e+00 -3.0526315789473685e-01 9.3185595567867041e-02 -2.8446129173348884e-02 -0.0000000000000000e+00 -3.3194207260061242e+07 -2.8261891016884526e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.9842318427908845e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.7957211533156259e-01 1.0000000000000000e+00 -2.9473684210526313e-01 8.6869806094182808e-02 -2.5603732322495985e-02 -0.0000000000000000e+00 -3.2884149286440846e+07 -6.5240578331303358e-04 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 4.1332976227096016e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 2.2566815140637572e-01 1.0000000000000000e+00 -2.8421052631578947e-01 8.0775623268698055e-02 -2.2957282402682605e-02 -0.0000000000000000e+00 -3.2564763542401820e+07 -1.3230474301884534e-04 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -8.9013361783352618e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0200288654363485e-01 1.0000000000000000e+00 -2.7368421052631581e-01 7.4903047091412753e-02 -2.0499781309228755e-02 -0.0000000000000000e+00 -3.2235910996290851e+07 -2.2763919031965966e-05 0.0000000000000000e+00 0.0000000000000000e+00 1.5963017835031278e-04 -1.1834739943082154e-01 0.0000000000000000e+00 0.0000000000000000e+00 -5.7534210508715421e-04 -3.2496472178913285e-02 1.0000000000000000e+00 -2.6315789473684209e-01 6.9252077562326861e-02 -1.8224230937454435e-02 -0.0000000000000000e+00 -3.1897453852406133e+07 1.1766933934135151e-01 0.0000000000000000e+00 0.0000000000000000e+00 2.0909077918752295e-02 -8.0054891556056232e-02 0.0000000000000000e+00 0.0000000000000000e+00 -5.2504663312063840e-02 -6.4544370785068139e-02 1.0000000000000000e+00 -2.5263157894736843e-01 6.3822714681440448e-02 -1.6123633182679693e-02 -0.0000000000000000e+00 -3.1549256225830898e+07 3.3915151685855366e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.1591503626231370e-01 -3.7427612913223690e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.7866822805253882e-01 -4.1890231978558873e-02 1.0000000000000000e+00 -2.4210526315789474e-01 5.8614958448753467e-02 -1.4190989940224523e-02 -0.0000000000000000e+00 -3.1191183393587172e+07 1.5626897220672240e-01 0.0000000000000000e+00 0.0000000000000000e+00 2.7102589539944072e-01 -1.3441300945223146e-02 0.0000000000000000e+00 0.0000000000000000e+00 -2.1937554065255879e-01 -1.7899992088040828e-02 1.0000000000000000e+00 -2.3157894736842105e-01 5.3628808864265930e-02 -1.2419303105408952e-02 -0.0000000000000000e+00 -3.0823097233739205e+07 -4.5889761416029337e-02 0.0000000000000000e+00 0.0000000000000000e+00 3.9533355560952921e-01 -3.9337519630671123e-03 0.0000000000000000e+00 0.0000000000000000e+00 -9.1202571282810271e-02 -5.8445454285107907e-03 1.0000000000000000e+00 -2.2105263157894736e-01 4.8864265927977837e-02 -1.0801574573552995e-02 -0.0000000000000000e+00 -3.0444855215506077e+07 -8.5485466293479639e-02 0.0000000000000000e+00 0.0000000000000000e+00 4.1742655305133147e-01 -9.7667774440200706e-04 0.0000000000000000e+00 0.0000000000000000e+00 3.6644079357620526e-02 -1.5645812979903574e-03 1.0000000000000000e+00 -2.1052631578947367e-01 4.4321329639889190e-02 -9.3308062399766710e-03 -0.0000000000000000e+00 -3.0056331771375515e+07 -5.1613987307140528e-02 0.0000000000000000e+00 0.0000000000000000e+00 3.7677806853318929e-01 -2.1180309163878829e-04 0.0000000000000000e+00 0.0000000000000000e+00 6.5354184877135096e-02 -3.5839960107434895e-04 1.0000000000000000e+00 -2.0000000000000001e-01 4.0000000000000008e-02 -8.0000000000000019e-03 -0.0000000000000000e+00 -2.9657376031923760e+07 -2.0586709118519942e-02 0.0000000000000000e+00 0.0000000000000000e+00 3.3324698528225988e-01 -4.1107072568475722e-05 0.0000000000000000e+00 0.0000000000000000e+00 4.7509924533272796e-02 -7.2519177339011736e-05 1.0000000000000000e+00 -1.8947368421052632e-01 3.5900277008310250e-02 -6.8021577489429958e-03 -0.0000000000000000e+00 -2.9247849788508385e+07 -6.3149871015004810e-03 0.0000000000000000e+00 0.0000000000000000e+00 2.6902481763940428e-01 -7.2140811359126060e-06 0.0000000000000000e+00 0.0000000000000000e+00 1.0882962004244967e-01 -1.3143774611327210e-05 1.0000000000000000e+00 -1.7894736842105263e-01 3.2022160664819943e-02 -5.7302813821256742e-03 -0.0000000000000000e+00 -2.8827620597817719e+07 -1.5998995910758370e-03 0.0000000000000000e+00 0.0000000000000000e+00 1.3858651749072567e-01 -1.0892662263073478e-06 0.0000000000000000e+00 0.0000000000000000e+00 2.1519494147264256e-01 -2.0331088554603621e-06 1.0000000000000000e+00 -1.6842105263157894e-01 2.8365650969529085e-02 -4.7773727948680561e-03 -0.0000000000000000e+00 -2.8396549759912662e+07 -3.4949313513852877e-04 0.0000000000000000e+00 0.0000000000000000e+00 -1.9968098640650647e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.9284674322244838e-01 0.0000000000000000e+00 1.0000000000000000e+00 -1.5789473684210525e-01 2.4930747922437671e-02 -3.9364338824901587e-03 -0.0000000000000000e+00 -2.7954504561053500e+07 -6.7774195098072875e-05 0.0000000000000000e+00 0.0000000000000000e+00 -1.1325358183759204e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.7893132438445457e-02 0.0000000000000000e+00 1.0000000000000000e+00 -1.4736842105263157e-01 2.1717451523545702e-02 -3.2004665403119982e-03 -0.0000000000000000e+00 -2.7501342752012245e+07 5.6890986261887112e-03 0.0000000000000000e+00 2.4632390461375082e-03 -1.0696208513639913e-01 0.0000000000000000e+00 0.0000000000000000e+00 -7.8171058742844777e-03 -5.6615568209656034e-02 0.0000000000000000e+00 1.0000000000000000e+00 -1.3684210526315790e-01 1.8725761772853188e-02 -2.5624726636535944e-03 -0.0000000000000000e+00 -2.7036898926030204e+07 2.4618481081097995e-01 0.0000000000000000e+00 4.8820311055440317e-02 -6.1272673063231448e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.0183399555242575e-01 -5.8086735980862475e-02 0.0000000000000000e+00 1.0000000000000000e+00 -1.2631578947368421e-01 1.5955678670360112e-02 -2.0154541478349616e-03 -0.0000000000000000e+00 -2.6561044027604885e+07 2.9953964554960644e-01 0.0000000000000000e+00 1.7507033112252593e-01 -2.5547441406802145e-02 0.0000000000000000e+00 0.0000000000000000e+00 -2.1612831441918828e-01 -3.0993666067196276e-02 0.0000000000000000e+00 1.0000000000000000e+00 -1.1578947368421053e-01 1.3407202216066482e-02 -1.5524128881761190e-03 -0.0000000000000000e+00 -2.6073662987834934e+07 5.7109131744450661e-02 0.0000000000000000e+00 3.2987495754170404e-01 -8.4051340693276771e-03 0.0000000000000000e+00 0.0000000000000000e+00 -1.8320362885086547e-01 -1.1758010932446473e-02 0.0000000000000000e+00 1.0000000000000000e+00 -1.0526315789473684e-01 1.1080332409972297e-02 -1.1663507799970839e-03 -0.0000000000000000e+00 -2.5574620168755420e+07 -7.8184230485700307e-02 0.0000000000000000e+00 4.1619910711870090e-01 -2.2938316718133949e-03 0.0000000000000000e+00 0.0000000000000000e+00 -2.7189893305956774e-02 -3.5232574314479738e-03 0.0000000000000000e+00 1.0000000000000000e+00 -9.4736842105263161e-02 8.9750692520775624e-03 -8.5026971861787448e-04 -0.0000000000000000e+00 -2.5063768014097415e+07 -7.4737010936004322e-02 0.0000000000000000e+00 4.0522771221040943e-01 -5.3798391262850328e-04 0.0000000000000000e+00 0.0000000000000000e+00 5.8945294122267095e-02 -8.8267150904121708e-04 0.0000000000000000e+00 1.0000000000000000e+00 -8.4210526315789472e-02 7.0914127423822712e-03 -5.9717159935850702e-04 -0.0000000000000000e+00 -2.4540960104356453e+07 -3.7125986784173719e-02 0.0000000000000000e+00 3.5823603012410249e-01 -1.1129983958096788e-04 0.0000000000000000e+00 0.0000000000000000e+00 5.6583906662152982e-02 -1.9175152461996296e-04 0.0000000000000000e+00 1.0000000000000000e+00 -7.3684210526315783e-02 5.4293628808864255e-03 -4.0005831753899977e-04 -0.0000000000000000e+00 -2.4006064559022680e+07 -1.3178061904414793e-02 0.0000000000000000e+00 3.1330364005523526e-01 -2.0616437121920034e-05 0.0000000000000000e+00 0.0000000000000000e+00 5.6795794533862881e-02 -3.6864262677767610e-05 0.0000000000000000e+00 1.0000000000000000e+00 -6.3157894736842107e-02 3.9889196675900280e-03 -2.5193176847937020e-04 -0.0000000000000000e+00 -2.3458940367557507e+07 -3.7205065395061786e-03 0.0000000000000000e+00 2.2498345090173064e-01 -3.5244398305267092e-06 0.0000000000000000e+00 0.0000000000000000e+00 1.5701149313902130e-01 -6.4936311517650639e-06 0.0000000000000000e+00 1.0000000000000000e+00 -5.2631578947368418e-02 2.7700831024930744e-03 -1.4579384749963548e-04 -0.0000000000000000e+00 -2.2899451906451862e+07 -8.8460183856306875e-04 0.0000000000000000e+00 7.3882604017788261e-02 -5.1822314315757625e-07 0.0000000000000000e+00 0.0000000000000000e+00 2.2974705727631178e-01 -9.7632309208494808e-07 0.0000000000000000e+00 1.0000000000000000e+00 -4.2105263157894736e-02 1.7728531855955678e-03 -7.4646449919813377e-05 -0.0000000000000000e+00 -2.2327463736216143e+07 -1.8370553324891862e-04 0.0000000000000000e+00 -7.0043906737830369e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.3510500812682708e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -3.1578947368421054e-02 9.9722991689750701e-04 -3.1491471059921275e-05 -0.0000000000000000e+00 -2.1742838455218170e+07 -3.4179300052400812e-05 0.0000000000000000e+00 -1.2021127225410981e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.3575925213518148e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -2.1052631578947368e-02 4.4321329639889195e-04 -9.3308062399766721e-06 -0.0000000000000000e+00 -2.1145430968515009e+07 6.1977227981245921e-02 0.0000000000000000e+00 -7.7681102540454725e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -9.7440172702976169e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -1.0526315789473684e-02 1.1080332409972299e-04 -1.1663507799970840e-06 -0.0000000000000000e+00 -2.0535093915450227e+07 3.2914194698808724e-01 0.0000000000000000e+00 4.5664291184986848e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.0184970630530680e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.9911701609588567e+07 2.0869631953634510e-01 0.0000000000000000e+00 2.2250201777292919e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -2.4825587622553755e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 1.0526315789473684e-02 1.1080332409972299e-04 1.1663507799970840e-06 1.1663507799970840e-06 -1.9275125611193918e+07 -1.8988164869436275e-02 0.0000000000000000e+00 3.7253865158380839e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.3227636261920103e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 2.1052631578947368e-02 4.4321329639889195e-04 9.3308062399766721e-06 9.3308062399766721e-06 -1.8625232264776390e+07 -8.7172921748424712e-02 0.0000000000000000e+00 4.1880193384853720e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.7975180667404809e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 3.1578947368421054e-02 9.9722991689750701e-04 3.1491471059921275e-05 3.1491471059921275e-05 -1.7961888448384833e+07 -5.9479346491040953e-02 0.0000000000000000e+00 3.8647078990954892e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.6336098965565221e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 4.2105263157894736e-02 1.7728531855955678e-03 7.4646449919813377e-05 7.4646449919813377e-05 -1.7284953087222628e+07 -2.5344894340844023e-02 0.0000000000000000e+00 3.4167515847138674e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 4.8489996379394593e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 5.2631578947368418e-02 2.7700831024930744e-03 1.4579384749963548e-04 1.4579384749963548e-04 -1.6594301290707462e+07 -8.1360247239318281e-03 0.0000000000000000e+00 2.8634422413720390e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 8.6939289263681607e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 6.3157894736842107e-02 3.9889196675900280e-03 2.5193176847937020e-04 2.5193176847937020e-04 -1.5889801689018261e+07 -2.1329811439999668e-03 0.0000000000000000e+00 1.6941872812306108e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.9932443727845253e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 7.3684210526315783e-02 5.4293628808864255e-03 4.0005831753899977e-04 4.0005831753899977e-04 -1.5171332894279920e+07 -4.7868580374672127e-04 0.0000000000000000e+00 9.7339652391691400e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 2.1317756978596630e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 8.4210526315789472e-02 7.0914127423822712e-03 5.9717159935850702e-04 5.9717159935850702e-04 -1.4438759648172289e+07 -9.4885576906897671e-05 0.0000000000000000e+00 -1.0342703349042752e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.8989839045089055e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 9.4736842105263161e-02 8.9750692520775624e-03 8.5026971861787448e-04 8.5026971861787448e-04 -1.3691952932217911e+07 4.6549987495046264e-04 0.0000000000000000e+00 -1.1370885399932430e-01 7.7849954623234125e-04 0.0000000000000000e+00 0.0000000000000000e+00 -4.6780737173119882e-02 -2.6406827272240662e-03 0.0000000000000000e+00 1.0000000000000000e+00 1.0526315789473684e-01 1.1080332409972297e-02 1.1663507799970839e-03 1.1663507799970839e-03 -1.2930769741141085e+07 1.8286923577271125e-01 0.0000000000000000e+00 -7.0505792246302060e-02 3.3178956057083007e-02 0.0000000000000000e+00 0.0000000000000000e+00 -6.2077599161047464e-02 -7.6128408846089957e-02 0.0000000000000000e+00 1.0000000000000000e+00 1.1578947368421053e-01 1.3407202216066482e-02 1.5524128881761190e-03 1.5524128881761190e-03 -1.2155081930344526e+07 3.2815365238396688e-01 0.0000000000000000e+00 -3.1075379328076348e-02 1.4440180014681392e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.6293517323374769e-02 -1.9992737502479768e-01 0.0000000000000000e+00 1.0000000000000000e+00 1.2631578947368421e-01 1.5955678670360112e-02 2.0154541478349616e-03 2.0154541478349616e-03 -1.1364771600352474e+07 1.0472734947167857e-01 0.0000000000000000e+00 -1.0670392954156069e-02 3.0145227582186795e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.4579521804191179e-02 -2.0462195254207211e-01 0.0000000000000000e+00 1.0000000000000000e+00 1.3684210526315790e-01 1.8725761772853188e-02 2.5624726636535944e-03 2.5624726636535944e-03 -1.0559703577307537e+07 -6.5414735178786182e-02 0.0000000000000000e+00 -3.0133373947628865e-03 4.0816930917890015e-01 0.0000000000000000e+00 0.0000000000000000e+00 -4.5547919780519388e-03 -5.7811109467925295e-02 0.0000000000000000e+00 1.0000000000000000e+00 1.4736842105263157e-01 2.1717451523545702e-02 3.2004665403119982e-03 3.2004665403119982e-03 -9.7397521797204204e+06 -8.1039296440989866e-02 0.0000000000000000e+00 -7.2674473172627254e-04 4.1235355967765930e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.1786336808960263e-03 4.9499274042558168e-02 0.0000000000000000e+00 1.0000000000000000e+00 1.5789473684210525e-01 2.4930747922437671e-02 3.9364338824901587e-03 3.9364338824901587e-03 -8.9047886006812230e+06 -4.4087905632133433e-02 0.0000000000000000e+00 -1.5386856254847681e-04 3.6716665813617538e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.6278123722066446e-04 6.1468774353944371e-02 0.0000000000000000e+00 1.0000000000000000e+00 1.6842105263157894e-01 2.8365650969529085e-02 4.7773727948680561e-03 4.7773727948680561e-03 -8.0546743582526855e+06 -1.6550422413122053e-02 0.0000000000000000e+00 -2.9267163684834725e-05 3.2392301620679653e-01 0.0000000000000000e+00 0.0000000000000000e+00 -5.2002150652398645e-05 4.9847666173530675e-02 0.0000000000000000e+00 1.0000000000000000e+00 1.7894736842105263e-01 3.2022160664819943e-02 5.7302813821256742e-03 5.7302813821256742e-03 -7.1892840764447525e+06 -4.8645759144098875e-03 0.0000000000000000e+00 -5.0502559169199467e-06 2.4859549248739221e-01 0.0000000000000000e+00 0.0000000000000000e+00 -9.2540988607474148e-06 1.3286328271100897e-01 0.0000000000000000e+00 1.0000000000000000e+00 1.8947368421052632e-01 3.5900277008310250e-02 6.8021577489429958e-03 6.8021577489429958e-03 -6.3084860786178336e+06 -1.1930031026986410e-03 0.0000000000000000e+00 -7.5235207367785176e-07 1.0653571529948751e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.4109555146948659e-06 2.2584550280153473e-01 0.0000000000000000e+00 1.0000000000000000e+00 2.0000000000000001e-01 4.0000000000000008e-02 8.0000000000000019e-03 8.0000000000000019e-03 -5.4121575398616791e+06 -2.5396202412826985e-04 0.0000000000000000e+00 0.0000000000000000e+00 -4.6832193791313273e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.6614010275431002e-01 0.0000000000000000e+00 1.0000000000000000e+00 2.1052631578947367e-01 4.4321329639889190e-02 9.3308062399766710e-03 9.3308062399766710e-03 -4.5001672449266911e+06 -4.8220518125413127e-05 1.7972165294585549e-07 0.0000000000000000e+00 -1.1871343983470048e-01 0.0000000000000000e+00 -7.0891758549395515e-07 0.0000000000000000e+00 1.0091095488977575e-02 0.0000000000000000e+00 1.0000000000000000e+00 2.2105263157894736e-01 4.8864265927977837e-02 1.0801574573552995e-02 1.0801574573552995e-02 -3.5723705129204541e+06 2.4169016536933870e-02 5.9438222395961854e-03 0.0000000000000000e+00 -9.8735297547960704e-02 0.0000000000000000e+00 -1.7549188213126270e-02 0.0000000000000000e+00 -6.2455637372362927e-02 0.0000000000000000e+00 1.0000000000000000e+00 2.3157894736842105e-01 5.3628808864265930e-02 1.2419303105408952e-02 1.2419303105408952e-02 -2.6286213762276992e+06 2.9724155612227232e-01 6.7859854151592250e-02 0.0000000000000000e+00 -5.2588926725789020e-02 0.0000000000000000e+00 -1.2825364543031706e-01 0.0000000000000000e+00 -5.3106046223313087e-02 0.0000000000000000e+00 1.0000000000000000e+00 2.4210526315789474e-01 5.8614958448753467e-02 1.4190989940224523e-02 1.4190989940224523e-02 -1.6688142544999197e+06 2.5803024806313540e-01 2.0702131854989700e-01 0.0000000000000000e+00 -2.0806537629376717e-02 0.0000000000000000e+00 -2.2538113774670057e-01 0.0000000000000000e+00 -2.6119971738518863e-02 0.0000000000000000e+00 1.0000000000000000e+00 2.5263157894736843e-01 6.3822714681440448e-02 1.6123633182679693e-02 1.6123633182679693e-02 -6.9281623238137364e+05 1.5480751995147656e-02 3.5555300272627366e-01 0.0000000000000000e+00 -6.5717408111991144e-03 0.0000000000000000e+00 -1.5615934487315666e-01 0.0000000000000000e+00 -9.3946798718158985e-03 0.0000000000000000e+00 1.0000000000000000e+00 2.6315789473684209e-01 6.9252077562326861e-02 1.8224230937454435e-02 1.8224230937454435e-02 2.9950556338893622e+05 -8.5092846828545440e-02 4.1993036162424330e-01 0.0000000000000000e+00 -1.7354377274918880e-03 0.0000000000000000e+00 -1.0730035819227872e-03 0.0000000000000000e+00 -2.7056969420893432e-03 0.0000000000000000e+00 1.0000000000000000e+00 2.7368421052631581e-01 7.4903047091412753e-02 2.0499781309228755e-02 2.0499781309228755e-02 1.3082802235329971e+06 -6.7344467282778389e-02 3.9648730685263067e-01 0.0000000000000000e+00 -3.9627411936631822e-04 0.0000000000000000e+00 6.4795239339487262e-02 0.0000000000000000e+00 -6.5730654337462586e-04 0.0000000000000000e+00 1.0000000000000000e+00 2.8421052631578947e-01 8.0775623268698055e-02 2.2957282402682605e-02 2.2957282402682605e-02 2.3336263527088910e+06 -3.0859121798647051e-02 3.4988988278635952e-01 0.0000000000000000e+00 -8.0173402304067263e-05 0.0000000000000000e+00 5.1950746816847776e-02 0.0000000000000000e+00 -1.3927740229629913e-04 0.0000000000000000e+00 1.0000000000000000e+00 2.9473684210526313e-01 8.6869806094182808e-02 2.5603732322495985e-02 2.5603732322495985e-02 3.3756763413345665e+06 -1.0398603799754021e-02 3.0094602992319508e-01 0.0000000000000000e+00 -1.4577289820257387e-05 0.0000000000000000e+00 6.9200680125590358e-02 0.0000000000000000e+00 -2.6236710696081891e-05 0.0000000000000000e+00 1.0000000000000000e+00 3.0526315789473685e-01 9.3185595567867041e-02 2.8446129173348884e-02 2.8446129173348884e-02 4.4345758976545855e+06 -2.8261891016884526e-03 1.9842544809308393e-01 0.0000000000000000e+00 -2.2638139954898488e-06 0.0000000000000000e+00 1.7957629818392778e-01 0.0000000000000000e+00 -4.1828523652043875e-06 0.0000000000000000e+00 1.0000000000000000e+00 3.1578947368421051e-01 9.9722991689750684e-02 3.1491471059921269e-02 3.1491471059921269e-02 5.5104500529150888e+06 -6.5240578331303358e-04 4.1333332231519290e-02 0.0000000000000000e+00 -3.5600442327298116e-07 0.0000000000000000e+00 2.2566882506564701e-01 0.0000000000000000e+00 -6.7365927127104966e-07 0.0000000000000000e+00 1.0000000000000000e+00 3.2631578947368423e-01 1.0648199445983381e-01 3.4746756086893135e-02 3.4746756086893135e-02 6.6034118503785804e+06 -1.3230474301884534e-04 -8.9013361783352618e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0200288654363485e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 3.3684210526315789e-01 1.1346260387811634e-01 3.8218982358944449e-02 3.8218982358944449e-02 7.7135919904695749e+06 -2.2763919031965966e-05 -1.1834739943082154e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.5963017835031278e-04 -3.2496472178913285e-02 0.0000000000000000e+00 0.0000000000000000e+00 -5.7534210508715421e-04 1.0000000000000000e+00 3.4736842105263160e-01 1.2066481994459835e-01 4.1915147980755220e-02 4.1915147980755220e-02 8.8411533235496357e+06 1.1766933934135151e-01 -8.0054891556056232e-02 0.0000000000000000e+00 0.0000000000000000e+00 2.0909077918752295e-02 -6.4544370785068139e-02 0.0000000000000000e+00 0.0000000000000000e+00 -5.2504663312063840e-02 1.0000000000000000e+00 3.5789473684210527e-01 1.2808864265927977e-01 4.5842251057005394e-02 4.5842251057005394e-02 9.9862234019981027e+06 3.3915151685855366e-01 -3.7427612913223690e-02 0.0000000000000000e+00 0.0000000000000000e+00 1.1591503626231370e-01 -4.1890231978558873e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.7866822805253882e-01 1.0000000000000000e+00 3.6842105263157893e-01 1.3573407202216065e-01 5.0007289692374973e-02 5.0007289692374973e-02 1.1148932302210726e+07 1.5626897220672240e-01 -1.3441300945223146e-02 0.0000000000000000e+00 0.0000000000000000e+00 2.7102589539944072e-01 -1.7899992088040828e-02 0.0000000000000000e+00 0.0000000000000000e+00 -2.1937554065255879e-01 1.0000000000000000e+00 3.7894736842105264e-01 1.4360110803324100e-01 5.4417261991543966e-02 5.4417261991543966e-02 1.2329372176345021e+07 -4.5889761416029337e-02 -3.9337519630671123e-03 0.0000000000000000e+00 0.0000000000000000e+00 3.9533355560952921e-01 -5.8445454285107907e-03 0.0000000000000000e+00 0.0000000000000000e+00 -9.1202571282810271e-02 1.0000000000000000e+00 3.8947368421052631e-01 1.5168975069252078e-01 5.9079166059192299e-02 5.9079166059192299e-02 1.3527680857389219e+07 -8.5485466293479639e-02 -9.7667774440200706e-04 0.0000000000000000e+00 0.0000000000000000e+00 4.1742655305133147e-01 -1.5645812979903574e-03 0.0000000000000000e+00 0.0000000000000000e+00 3.6644079357620526e-02 1.0000000000000000e+00 4.0000000000000002e-01 1.6000000000000003e-01 6.4000000000000015e-02 6.4000000000000015e-02 1.4744013954299904e+07 -5.1613987307140528e-02 -2.1180309163878829e-04 0.0000000000000000e+00 0.0000000000000000e+00 3.7677806853318929e-01 -3.5839960107434895e-04 0.0000000000000000e+00 0.0000000000000000e+00 6.5354184877135096e-02 1.0000000000000000e+00 4.1052631578947368e-01 1.6853185595567868e-01 6.9186761918647033e-02 6.9186761918647033e-02 1.5978502905917309e+07 -2.0586709118519942e-02 -4.1107072568475722e-05 0.0000000000000000e+00 0.0000000000000000e+00 3.3324698528225988e-01 -7.2519177339011736e-05 0.0000000000000000e+00 0.0000000000000000e+00 4.7509924533272796e-02 1.0000000000000000e+00 4.2105263157894735e-01 1.7728531855955676e-01 7.4646449919813368e-02 7.4646449919813368e-02 1.7231268692723721e+07 -6.3149871015004810e-03 -7.2140811359126060e-06 0.0000000000000000e+00 0.0000000000000000e+00 2.6902481763940428e-01 -1.3143774611327210e-05 0.0000000000000000e+00 0.0000000000000000e+00 1.0882962004244967e-01 1.0000000000000000e+00 4.3157894736842106e-01 1.8626038781163437e-01 8.0386062108179043e-02 8.0386062108179043e-02 1.8502441721045829e+07 -1.5998995910758370e-03 -1.0892662263073478e-06 0.0000000000000000e+00 0.0000000000000000e+00 1.3858651749072567e-01 -2.0331088554603621e-06 0.0000000000000000e+00 0.0000000000000000e+00 2.1519494147264256e-01 1.0000000000000000e+00 4.4210526315789472e-01 1.9545706371191135e-01 8.6412596588423957e-02 8.6412596588423957e-02 1.9792148664706565e+07 -3.4949313513852877e-04 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.9968098640650647e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.9284674322244838e-01 1.0000000000000000e+00 4.5263157894736844e-01 2.0487534626038784e-01 9.2733051465228172e-02 9.2733051465228172e-02 2.1100520328300230e+07 -6.7774195098072875e-05 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.1325358183759204e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 3.7893132438445457e-02 1.0000000000000000e+00 4.6315789473684210e-01 2.1451523545706372e-01 9.9354424843271616e-02 9.9354424843271616e-02 2.2427701408503219e+07 5.6890986261887112e-03 2.4632390461375082e-03 0.0000000000000000e+00 0.0000000000000000e+00 -1.0696208513639913e-01 -7.8171058742844777e-03 0.0000000000000000e+00 0.0000000000000000e+00 -5.6615568209656034e-02 1.0000000000000000e+00 4.7368421052631576e-01 2.2437673130193903e-01 1.0628371482723427e-01 1.0628371482723427e-01 2.3773807935681403e+07 2.4618481081097995e-01 4.8820311055440317e-02 0.0000000000000000e+00 0.0000000000000000e+00 -6.1272673063231448e-02 -1.0183399555242575e-01 0.0000000000000000e+00 0.0000000000000000e+00 -5.8086735980862475e-02 1.0000000000000000e+00 4.8421052631578948e-01 2.3445983379501387e-01 1.1352791952179618e-01 1.1352791952179618e-01 2.5138965494905464e+07 2.9953964554960644e-01 1.7507033112252593e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.5547441406802145e-02 -2.1612831441918828e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.0993666067196276e-02 1.0000000000000000e+00 4.9473684210526314e-01 2.4476454293628808e-01 1.2109403703163725e-01 1.2109403703163725e-01 2.6523310263039686e+07 5.7109131744450661e-02 3.2987495754170404e-01 0.0000000000000000e+00 0.0000000000000000e+00 -8.4051340693276771e-03 -1.8320362885086547e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.1758010932446473e-02 1.0000000000000000e+00 5.0526315789473686e-01 2.5529085872576179e-01 1.2898906546143754e-01 1.2898906546143754e-01 2.7926978180642635e+07 -7.8184230485700307e-02 4.1619910711870090e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.2938316718133949e-03 -2.7189893305956774e-02 0.0000000000000000e+00 0.0000000000000000e+00 -3.5232574314479738e-03 1.0000000000000000e+00 5.1578947368421058e-01 2.6603878116343493e-01 1.3722000291587699e-01 1.3722000291587699e-01 2.9350100892358869e+07 -7.4737010936004322e-02 4.0522771221040943e-01 0.0000000000000000e+00 0.0000000000000000e+00 -5.3798391262850328e-04 5.8945294122267095e-02 0.0000000000000000e+00 0.0000000000000000e+00 -8.8267150904121708e-04 1.0000000000000000e+00 5.2631578947368418e-01 2.7700831024930744e-01 1.4579384749963548e-01 1.4579384749963548e-01 3.0792791682040647e+07 -3.7125986784173719e-02 3.5823603012410249e-01 0.0000000000000000e+00 0.0000000000000000e+00 -1.1129983958096788e-04 5.6583906662152982e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.9175152461996296e-04 1.0000000000000000e+00 5.3684210526315790e-01 2.8819944598337949e-01 1.5471759731739321e-01 1.5471759731739321e-01 3.2255188890072003e+07 -1.3178061904414793e-02 3.1330364005523526e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.0616437121920034e-05 5.6795794533862881e-02 0.0000000000000000e+00 0.0000000000000000e+00 -3.6864262677767610e-05 1.0000000000000000e+00 5.4736842105263162e-01 2.9961218836565101e-01 1.6399825047383004e-01 1.6399825047383004e-01 3.3737424698033422e+07 -3.7205065395061786e-03 2.2498345090173064e-01 0.0000000000000000e+00 0.0000000000000000e+00 -3.5244398305267092e-06 1.5701149313902130e-01 0.0000000000000000e+00 0.0000000000000000e+00 -6.4936311517650639e-06 1.0000000000000000e+00 5.5789473684210522e-01 3.1124653739612185e-01 1.7364280507362584e-01 1.7364280507362584e-01 3.5239632326350734e+07 -8.8460183856306875e-04 7.3882604017788261e-02 0.0000000000000000e+00 0.0000000000000000e+00 -5.1822314315757625e-07 2.2974705727631178e-01 0.0000000000000000e+00 0.0000000000000000e+00 -9.7632309208494808e-07 1.0000000000000000e+00 5.6842105263157894e-01 3.2310249307479222e-01 1.8365825922146084e-01 1.8365825922146084e-01 3.6761934819881216e+07 -1.8370553324891862e-04 -7.0043906737830369e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.3510500812682708e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 5.7894736842105265e-01 3.3518005540166207e-01 1.9405161102201490e-01 1.9405161102201490e-01 3.8304448683193475e+07 -3.4179300052400812e-05 -1.2022600095442171e-01 0.0000000000000000e+00 1.4728700311903286e-05 0.0000000000000000e+00 -1.3519940771543261e-02 0.0000000000000000e+00 -5.5984441974886700e-05 0.0000000000000000e+00 1.0000000000000000e+00 5.8947368421052626e-01 3.4747922437673123e-01 2.0482985857996788e-01 2.0482985857996788e-01 3.9867328474246711e+07 6.1977227981245921e-02 -8.9600939014954029e-02 0.0000000000000000e+00 1.1919836474499302e-02 0.0000000000000000e+00 -6.4884679517546728e-02 0.0000000000000000e+00 -3.2555493185429440e-02 0.0000000000000000e+00 1.0000000000000000e+00 5.9999999999999998e-01 3.5999999999999999e-01 2.1599999999999997e-01 2.1599999999999997e-01 4.1450727994291037e+07 3.2914194698808724e-01 -4.4610650244984916e-02 0.0000000000000000e+00 9.0274941429971764e-02 0.0000000000000000e+00 -4.7586436324094594e-02 0.0000000000000000e+00 -1.5426326998121220e-01 0.0000000000000000e+00 1.0000000000000000e+00 6.1052631578947369e-01 3.7274238227146816e-01 2.2756903338679108e-01 2.2756903338679108e-01 4.3054756450479880e+07 2.0869631953634510e-01 -1.6794634932976529e-02 0.0000000000000000e+00 2.3929665270590575e-01 0.0000000000000000e+00 -2.1745661626648261e-02 0.0000000000000000e+00 -2.2651021459888929e-01 0.0000000000000000e+00 1.0000000000000000e+00 6.2105263157894741e-01 3.8570637119113577e-01 2.3954395684502119e-01 2.3954395684502119e-01 4.4679530292131960e+07 -1.8988164869436275e-02 -5.1018176378398936e-03 0.0000000000000000e+00 3.7764046922164829e-01 0.0000000000000000e+00 -7.4407761527462875e-03 0.0000000000000000e+00 -1.2483558646645473e-01 0.0000000000000000e+00 1.0000000000000000e+00 6.3157894736842102e-01 3.9889196675900274e-01 2.5193176847937016e-01 2.5193176847937016e-01 4.6325184495605588e+07 -8.7172921748424712e-02 -1.3055419048209041e-03 0.0000000000000000e+00 4.2010747575335811e-01 0.0000000000000000e+00 -2.0641445077479018e-03 0.0000000000000000e+00 2.0039325175152698e-02 0.0000000000000000e+00 1.0000000000000000e+00 6.4210526315789473e-01 4.1229916897506924e-01 2.6473946639451817e-01 2.6473946639451817e-01 4.7991846162983879e+07 -5.9479346491040953e-02 -2.9049537695812611e-04 0.0000000000000000e+00 3.8676128528650705e-01 0.0000000000000000e+00 -4.8684122664065948e-04 0.0000000000000000e+00 6.6822940192205887e-02 0.0000000000000000e+00 1.0000000000000000e+00 6.5263157894736845e-01 4.2592797783933523e-01 2.7797404869514508e-01 2.7797404869514508e-01 4.9679642121459484e+07 -2.5344894340844023e-02 -5.7519918573519355e-05 0.0000000000000000e+00 3.4173267838996024e-01 0.0000000000000000e+00 -1.0071519927700912e-04 0.0000000000000000e+00 4.8590711578671605e-02 0.0000000000000000e+00 1.0000000000000000e+00 6.6315789473684206e-01 4.3977839335180047e-01 2.9164251348593084e-01 2.9164251348593084e-01 5.1388713588894203e+07 -8.1360247239318281e-03 -1.0271890012879138e-05 0.0000000000000000e+00 2.8635449602721674e-01 0.0000000000000000e+00 -1.8603553893817027e-05 0.0000000000000000e+00 8.6957892817575433e-02 0.0000000000000000e+00 1.0000000000000000e+00 6.7368421052631577e-01 4.5385041551246535e-01 3.0575185887155559e-01 3.0575185887155559e-01 5.3119190209192812e+07 -2.1329811439999668e-03 -1.5726052562367482e-06 0.0000000000000000e+00 1.6942030072831732e-01 0.0000000000000000e+00 -2.9207518307516431e-06 0.0000000000000000e+00 1.9932735803028329e-01 0.0000000000000000e+00 1.0000000000000000e+00 6.8421052631578949e-01 4.6814404432132967e-01 3.2030908295669924e-01 3.2030908295669924e-01 5.4871193086425975e+07 -4.7868580374672127e-04 -2.4393255686410929e-07 0.0000000000000000e+00 9.7342091717260160e-03 0.0000000000000000e+00 -4.6354368728304449e-07 0.0000000000000000e+00 2.1317803332965357e-01 0.0000000000000000e+00 1.0000000000000000e+00 6.9473684210526321e-01 4.8265927977839340e-01 3.3532118384604176e-01 3.3532118384604176e-01 5.6644850124556869e+07 -9.4885576906897671e-05 0.0000000000000000e+00 0.0000000000000000e+00 -1.0342703349042752e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 6.8989839045089055e-02 0.0000000000000000e+00 1.0000000000000000e+00 7.0526315789473681e-01 4.9739612188365645e-01 3.5079515964426300e-01 3.5079515964426300e-01 5.8440306078570858e+07 4.6549987495046264e-04 0.0000000000000000e+00 7.7849954623234125e-04 -1.1370885399932430e-01 0.0000000000000000e+00 0.0000000000000000e+00 -2.6406827272240662e-03 -4.6780737173119882e-02 0.0000000000000000e+00 1.0000000000000000e+00 7.1578947368421053e-01 5.1235457063711909e-01 3.6673800845604315e-01 3.6673800845604315e-01 6.0257698738376126e+07 1.8286923577271125e-01 0.0000000000000000e+00 3.3178956057083007e-02 -7.0505792246302060e-02 0.0000000000000000e+00 0.0000000000000000e+00 -7.6128408846089957e-02 -6.2077599161047464e-02 0.0000000000000000e+00 1.0000000000000000e+00 7.2631578947368425e-01 5.2753462603878121e-01 3.8315672838606218e-01 3.8315672838606218e-01 6.2097159447418898e+07 3.2815365238396688e-01 0.0000000000000000e+00 1.4440180014681392e-01 -3.1075379328076348e-02 0.0000000000000000e+00 0.0000000000000000e+00 -1.9992737502479768e-01 -3.6293517323374769e-02 0.0000000000000000e+00 1.0000000000000000e+00 7.3684210526315785e-01 5.4293628808864258e-01 4.0005831753899979e-01 4.0005831753899979e-01 6.3958803792432591e+07 1.0472734947167857e-01 0.0000000000000000e+00 3.0145227582186795e-01 -1.0670392954156069e-02 0.0000000000000000e+00 0.0000000000000000e+00 -2.0462195254207211e-01 -1.4579521804191179e-02 0.0000000000000000e+00 1.0000000000000000e+00 7.4736842105263157e-01 5.5855955678670355e-01 4.1744977401953637e-01 4.1744977401953637e-01 6.5842753707203761e+07 -6.5414735178786182e-02 0.0000000000000000e+00 4.0816930917890015e-01 -3.0133373947628865e-03 0.0000000000000000e+00 0.0000000000000000e+00 -5.7811109467925295e-02 -4.5547919780519388e-03 0.0000000000000000e+00 1.0000000000000000e+00 7.5789473684210529e-01 5.7440443213296399e-01 4.3533809593235173e-01 4.3533809593235173e-01 6.7749140587336496e+07 -8.1039296440989866e-02 0.0000000000000000e+00 4.1235355967765930e-01 -7.2674473172627254e-04 0.0000000000000000e+00 0.0000000000000000e+00 4.9499274042558168e-02 -1.1786336808960263e-03 0.0000000000000000e+00 1.0000000000000000e+00 7.6842105263157889e-01 5.9047091412742370e-01 4.5373028138212557e-01 4.5373028138212557e-01 6.9678108008850276e+07 -4.4087905632133433e-02 0.0000000000000000e+00 3.6716665813617538e-01 -1.5386856254847681e-04 0.0000000000000000e+00 0.0000000000000000e+00 6.1468774353944371e-02 -2.6278123722066446e-04 0.0000000000000000e+00 1.0000000000000000e+00 7.7894736842105261e-01 6.0675900277008310e-01 4.7263332847353839e-01 4.7263332847353839e-01 7.1629787035486892e+07 -1.6550422413122053e-02 0.0000000000000000e+00 3.2392301620679653e-01 -2.9267163684834725e-05 0.0000000000000000e+00 0.0000000000000000e+00 4.9847666173530675e-02 -5.2002150652398645e-05 0.0000000000000000e+00 1.0000000000000000e+00 7.8947368421052633e-01 6.2326869806094187e-01 4.9205423531126991e-01 4.9205423531126991e-01 7.3604299627690956e+07 -4.8645759144098875e-03 0.0000000000000000e+00 2.4859549248739221e-01 -5.0502559169199467e-06 0.0000000000000000e+00 0.0000000000000000e+00 1.3286328271100897e-01 -9.2540988607474148e-06 0.0000000000000000e+00 1.0000000000000000e+00 8.0000000000000004e-01 6.4000000000000012e-01 5.1200000000000012e-01 5.1200000000000012e-01 7.5601781557119071e+07 -1.1930031026986410e-03 0.0000000000000000e+00 1.0653571529948751e-01 -7.5235207367785176e-07 0.0000000000000000e+00 0.0000000000000000e+00 2.2584550280153473e-01 -1.4109555146948659e-06 0.0000000000000000e+00 1.0000000000000000e+00 8.1052631578947365e-01 6.5695290858725752e-01 5.3247762064440873e-01 5.3247762064440873e-01 7.7622359293640539e+07 -2.5396202412826985e-04 0.0000000000000000e+00 -4.6832193791313273e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.6614010275431002e-01 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 8.2105263157894737e-01 6.7412742382271473e-01 5.5349409534917626e-01 5.5349409534917626e-01 7.9666160208791256e+07 -4.8220518125413127e-05 0.0000000000000000e+00 -1.1871343983470048e-01 0.0000000000000000e+00 1.7972165294585549e-07 0.0000000000000000e+00 1.0091095488977575e-02 0.0000000000000000e+00 -7.0891758549395515e-07 1.0000000000000000e+00 8.3157894736842108e-01 6.9152354570637120e-01 5.7505642221898245e-01 5.7505642221898245e-01 8.1733321542241797e+07 2.4169016536933870e-02 0.0000000000000000e+00 -9.8735297547960704e-02 0.0000000000000000e+00 5.9438222395961854e-03 0.0000000000000000e+00 -6.2455637372362927e-02 0.0000000000000000e+00 -1.7549188213126270e-02 1.0000000000000000e+00 8.4210526315789469e-01 7.0914127423822704e-01 5.9717159935850694e-01 5.9717159935850694e-01 8.3823983214631885e+07 2.9724155612227232e-01 0.0000000000000000e+00 -5.2588926725789020e-02 0.0000000000000000e+00 6.7859854151592250e-02 0.0000000000000000e+00 -5.3106046223313087e-02 0.0000000000000000e+00 -1.2825364543031706e-01 1.0000000000000000e+00 8.5263157894736841e-01 7.2698060941828258e-01 6.1984662487243036e-01 6.1984662487243036e-01 8.5938269240893766e+07 2.5803024806313540e-01 0.0000000000000000e+00 -2.0806537629376717e-02 0.0000000000000000e+00 2.0702131854989700e-01 0.0000000000000000e+00 -2.6119971738518863e-02 0.0000000000000000e+00 -2.2538113774670057e-01 1.0000000000000000e+00 8.6315789473684212e-01 7.4504155124653748e-01 6.4308849686543235e-01 6.4308849686543235e-01 8.8076292616174176e+07 1.5480751995147656e-02 0.0000000000000000e+00 -6.5717408111991144e-03 0.0000000000000000e+00 3.5555300272627366e-01 0.0000000000000000e+00 -9.3946798718158985e-03 0.0000000000000000e+00 -1.5615934487315666e-01 1.0000000000000000e+00 8.7368421052631584e-01 7.6332409972299176e-01 6.6690421344219286e-01 6.6690421344219286e-01 9.0238174707291275e+07 -8.5092846828545440e-02 0.0000000000000000e+00 -1.7354377274918880e-03 0.0000000000000000e+00 4.1993036162424330e-01 0.0000000000000000e+00 -2.7056969420893432e-03 0.0000000000000000e+00 -1.0730035819227872e-03 1.0000000000000000e+00 8.8421052631578945e-01 7.8182825484764540e-01 6.9130077270739165e-01 6.9130077270739165e-01 9.2424069093082637e+07 -6.7344467282778389e-02 0.0000000000000000e+00 -3.9627411936631822e-04 0.0000000000000000e+00 3.9648730685263067e-01 0.0000000000000000e+00 -6.5730654337462586e-04 0.0000000000000000e+00 6.4795239339487262e-02 1.0000000000000000e+00 8.9473684210526316e-01 8.0055401662049863e-01 7.1628517276570935e-01 7.1628517276570935e-01 9.4634114440171003e+07 -3.0859121798647051e-02 0.0000000000000000e+00 -8.0173402304067263e-05 0.0000000000000000e+00 3.4988988278635952e-01 0.0000000000000000e+00 -1.3927740229629913e-04 0.0000000000000000e+00 5.1950746816847776e-02 1.0000000000000000e+00 9.0526315789473688e-01 8.1950138504155134e-01 7.4186441172182538e-01 7.4186441172182538e-01 9.6868439125350088e+07 -1.0398603799754021e-02 0.0000000000000000e+00 -1.4577289820257387e-05 0.0000000000000000e+00 3.0094602992319508e-01 0.0000000000000000e+00 -2.6236710696081891e-05 0.0000000000000000e+00 6.9200680125590358e-02 1.0000000000000000e+00 9.1578947368421049e-01 8.3867036011080320e-01 7.6804548768041980e-01 7.6804548768041980e-01 9.9127162507476807e+07 -2.8261891016884526e-03 0.0000000000000000e+00 -2.2638139954898488e-06 0.0000000000000000e+00 1.9842544809308393e-01 0.0000000000000000e+00 -4.1828523652043875e-06 0.0000000000000000e+00 1.7957629818392778e-01 1.0000000000000000e+00 9.2631578947368420e-01 8.5806094182825488e-01 7.9483539874617293e-01 7.9483539874617293e-01 1.0141041149911594e+08 -6.5240578331303358e-04 0.0000000000000000e+00 -3.5600442327298116e-07 0.0000000000000000e+00 4.1333332231519290e-02 0.0000000000000000e+00 -6.7365927127104966e-07 0.0000000000000000e+00 2.2566882506564701e-01 1.0000000000000000e+00 9.3684210526315792e-01 8.7767313019390591e-01 8.2224114302376450e-01 8.2224114302376450e-01 1.0371832633873191e+08 -1.3230474301884534e-04 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -8.9013361783352618e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0200288654363485e-01 1.0000000000000000e+00 9.4736842105263153e-01 8.9750692520775610e-01 8.5026971861787415e-01 8.5026971861787415e-01 1.0605103777870619e+08 -2.4138787293590782e-05 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.1834739943082154e-01 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -3.2496472178913285e-02 1.0000000000000000e+00 9.5789473684210524e-01 9.1756232686980610e-01 8.7892812363318262e-01 8.7892812363318262e-01 1.0840866954560232e+08 -4.0326882503041127e-06 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -8.0054891556056232e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -6.4544370785068139e-02 1.0000000000000000e+00 9.6842105263157896e-01 9.3783933518005547e-01 9.0822335617436944e-01 9.0822335617436944e-01 1.1079135118806735e+08 -6.2527588338813119e-07 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -3.7427612913223690e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -4.1890231978558873e-02 1.0000000000000000e+00 9.7894736842105268e-01 9.5833795013850420e-01 9.3816241434611469e-01 9.3816241434611469e-01 1.1319921057647294e+08 -9.0937220696952162e-08 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.3441300945223146e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.7899992088040828e-02 1.0000000000000000e+00 9.8947368421052628e-01 9.7905817174515231e-01 9.6875229625309800e-01 9.6875229625309800e-01 1.1563238092136064e+08 -1.2510669177246911e-08 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -3.9337519630671123e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -5.8445454285107907e-03 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.1809099716453132e+08 """, 'event':"""0.0000000000000000e+00 1.7972165294585549e-07 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -7.0891758549395515e-07 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -1.0000000000000000e+00 1.0000000000000000e+00 -1.0000000000000000e+00 -0.0000000000000000e+00 -3.9964378119605467e+07 2.4177346180074877e-02 5.9438222395961854e-03 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.7549188213126270e-02 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 1.0000000000000000e+00 -9.8947368421052628e-01 9.7905817174515231e-01 -9.6875229625309800e-01 -0.0000000000000000e+00 -3.9964295632754065e+07 2.9724288489710671e-01 6.6691877390710710e-02 0.0000000000000000e+00 1.1679767608815418e-03 0.0000000000000000e+00 -1.2437298949140786e-01 0.0000000000000000e+00 -3.8806559389091872e-03 0.0000000000000000e+00 1.0000000000000000e+00 -9.7894736842105268e-01 9.5833795013850420e-01 -9.3816241434611469e-01 -0.0000000000000000e+00 -3.9964008717195466e+07 2.5803044610535053e-01 1.6919887664583222e-01 0.0000000000000000e+00 3.7738862474685671e-02 8.3579429379114777e-05 -1.4115599824277203e-01 0.0000000000000000e+00 -8.3918434149865523e-02 -3.0670535406300835e-04 1.0000000000000000e+00 -9.6842105263157896e-01 9.3783933518005547e-01 -9.0822335617436944e-01 -0.0000000000000000e+00 -3.9963382251577556e+07 1.5480779827902719e-02 2.0115527889004484e-01 1.7972165294585549e-07 1.3671413001462854e-01 1.7683414099947323e-02 5.0427887160567904e-02 -7.0891758549395515e-07 -1.6094050000148924e-01 -4.5646023114649811e-02 1.0000000000000000e+00 -9.5789473684210524e-01 9.1756232686980610e-01 -8.7892812363318262e-01 -0.0000000000000000e+00 -3.9962264238541424e+07 -8.5092843112443153e-02 1.0826831140941834e-01 5.9438222395961854e-03 2.0428516922367393e-01 1.0143305875155482e-01 1.9784723228238235e-01 -1.7549188213126270e-02 -2.7801642355785985e-02 -1.5356940529539287e-01 1.0000000000000000e+00 -9.4736842105263153e-01 8.9750692520775610e-01 -8.5026971861787415e-01 -0.0000000000000000e+00 -3.9960523102045782e+07 -6.7344467282778389e-02 -1.5192826604774678e-02 6.6691877390710710e-02 1.5192563333963566e-01 1.9306262272705899e-01 1.1100114773388517e-01 -1.2437298949140786e-01 1.7226230721222197e-01 -9.4095226115212052e-02 1.0000000000000000e+00 -9.3684210526315792e-01 8.7767313019390591e-01 -8.2224114302376450e-01 -0.0000000000000000e+00 -3.9958030461220443e+07 -3.0859121798647051e-02 -5.9611929791310761e-02 1.6919887664583222e-01 5.7816980815322772e-02 1.8256953454589439e-01 -3.1066893522055071e-03 -1.4115599824277203e-01 7.1597287908986007e-02 1.2430944114877630e-01 1.0000000000000000e+00 -9.2631578947368420e-01 8.5806094182825488e-01 -7.9483539874617293e-01 -0.0000000000000000e+00 -3.9954647886381753e+07 -1.0398603799754021e-02 -2.7212216744210212e-02 2.0115527889004484e-01 8.2514000683269279e-02 6.2172560915691455e-02 -8.0658471772627749e-02 5.0427887160567904e-02 -1.3400726359739601e-01 1.8779179630281093e-01 1.0000000000000000e+00 -9.1578947368421049e-01 8.3867036011080320e-01 -7.6804548768041980e-01 -0.0000000000000000e+00 -3.9950228902413361e+07 -2.8261891016884526e-03 8.0209005576718964e-02 1.0826831140941834e-01 1.5740056826346380e-01 -4.0075556165366157e-02 -1.7757047085784922e-01 1.9784723228238235e-01 -7.7253408640873558e-02 6.5434351891749037e-02 1.0000000000000000e+00 -9.0526315789473688e-01 8.1950138504155134e-01 -7.4186441172182538e-01 -0.0000000000000000e+00 -3.9944630184490673e+07 -6.5240578331303358e-04 1.8671873839475647e-01 -1.5192826604774678e-02 1.8910188123201388e-01 -5.8371983911825132e-02 -1.0812787798155156e-01 1.1100114773388517e-01 2.2488780400294686e-02 -2.2042096632510385e-02 1.0000000000000000e+00 -8.9473684210526316e-01 8.0055401662049863e-01 -7.1628517276570935e-01 -0.0000000000000000e+00 -3.9937731831733234e+07 -1.3230474301884534e-04 2.1815823789069552e-01 -5.9611929791310761e-02 1.7834064442664524e-01 -3.6309461213591215e-02 3.7153406706898165e-02 -3.1066893522055071e-03 2.8801918432358464e-04 -3.3403546593305647e-02 1.0000000000000000e+00 -8.8421052631578945e-01 7.8182825484764540e-01 -6.9130077270739165e-01 -0.0000000000000000e+00 -3.9929402377448291e+07 -2.4138787293590782e-05 1.9836539362542779e-01 -2.7212216744210212e-02 1.4361396913677585e-01 -1.5388981806849873e-02 2.6005752171882255e-02 -8.0658471772627749e-02 7.2486302997612467e-02 -1.8697604146125976e-02 1.0000000000000000e+00 -8.7368421052631584e-01 7.6332409972299176e-01 -6.6690421344219286e-01 -0.0000000000000000e+00 -3.9919467169778965e+07 -4.0326882503041127e-06 1.7004278105755538e-01 8.0209005576718964e-02 5.4586736964392375e-02 -5.0386096958458283e-03 1.9894509103233610e-02 -1.7757047085784922e-01 1.6459230660036478e-01 -7.0993673209910615e-03 1.0000000000000000e+00 -8.6315789473684212e-01 7.4504155124653748e-01 -6.4308849686543235e-01 -0.0000000000000000e+00 -3.9907786847505786e+07 -6.2527588338813119e-07 1.5905651853639177e-01 1.8671873839475647e-01 -4.3729264683742025e-02 -1.3582822198243730e-03 2.9690484600776099e-02 -1.0812787798155156e-01 8.1402624180322181e-02 -2.1023200711287751e-03 1.0000000000000000e+00 -8.5263157894736841e-01 7.2698060941828258e-01 -6.1984662487243036e-01 -0.0000000000000000e+00 -3.9894241441418923e+07 -9.0937220696952162e-08 1.5296388884063916e-01 2.1815823789069552e-01 -7.0669011124139827e-02 -2.3041555027059071e-04 -1.9050509451844708e-02 3.7153406706898165e-02 -1.6936932112187754e-02 -8.2546236142740852e-04 1.0000000000000000e+00 -8.4210526315789469e-01 7.0914127423822704e-01 -5.9717159935850694e-01 -0.0000000000000000e+00 -3.9878693250274450e+07 -1.2510669177246911e-08 1.3156558369691071e-01 1.9836521390377485e-01 -4.8243261570625216e-02 1.7619353944818051e-02 5.8662420655810149e-02 2.6006461089467747e-02 -3.9900023154498407e-02 -4.5757060072061179e-02 1.0000000000000000e+00 -8.3157894736842108e-01 6.9152354570637120e-01 -5.7505642221898245e-01 -0.0000000000000000e+00 -3.9860998616045773e+07 -1.6393707816718330e-09 5.6345100753362388e-02 1.6409895881795919e-01 -2.2078293686281818e-02 1.0142127575894735e-01 1.4129609867630374e-01 3.7443697316359881e-02 -2.5355446281368068e-02 -1.5359059365486832e-01 1.0000000000000000e+00 -8.2105263157894737e-01 6.7412742382271473e-01 -5.5349409534917626e-01 -0.0000000000000000e+00 -3.9841029804515697e+07 0.0000000000000000e+00 2.1793115828660697e-02 9.2364641145681062e-02 -6.5326696738188961e-03 1.9306063649818450e-01 -4.4755355088080082e-02 1.5406347409218396e-01 -1.4349982160473887e-02 -9.4098902995715231e-02 1.0000000000000000e+00 -8.1052631578947365e-01 6.5695290858725752e-01 -5.3247762064440873e-01 -0.0000000000000000e+00 -3.9818654567553706e+07 0.0000000000000000e+00 9.8253021885668562e-02 -1.6151408375813942e-02 3.5551552001175248e-02 1.8256922374919074e-01 -1.5854644388003486e-01 1.2179878343686432e-01 -8.7221277924167465e-02 1.2430885212834518e-01 1.0000000000000000e+00 -8.0000000000000004e-01 6.4000000000000012e-01 -5.1200000000000012e-01 -0.0000000000000000e+00 -3.9793725269769594e+07 0.0000000000000000e+00 1.5285476691215422e-01 -5.1906460814839753e-02 1.3618602296187249e-01 6.2172560915691455e-02 1.0428163142958963e-02 -3.7410780701822063e-02 -1.6179738022522952e-01 1.8779179630281093e-01 1.0000000000000000e+00 -7.8947368421052633e-01 6.2326869806094187e-01 -4.9205423531126991e-01 -0.0000000000000000e+00 -3.9766099107882999e+07 0.0000000000000000e+00 9.2123146236351552e-02 4.3566025855902690e-02 2.0417342571709904e-01 -4.0075556165366157e-02 1.5492344253726803e-01 -1.9257135068834524e-01 -2.7992687684244611e-02 6.5434351891749037e-02 1.0000000000000000e+00 -7.7894736842105261e-01 6.0675900277008310e-01 -4.7263332847353839e-01 -0.0000000000000000e+00 -3.9735647326953202e+07 0.0000000000000000e+00 4.3796418122361118e-02 1.6335668776978365e-01 1.5190460181838772e-01 -5.8371983911825132e-02 -2.3844844859590569e-02 -1.2547873944576943e-01 1.7222491478578517e-01 -2.2042096632510385e-02 1.0000000000000000e+00 -7.6842105263157889e-01 5.9047091412742370e-01 -4.5373028138212557e-01 -0.0000000000000000e+00 -3.9702236464460857e+07 0.0000000000000000e+00 1.0731574615492825e-01 1.7131918900642060e-01 5.7813335883083829e-02 -3.6225881784212099e-02 -1.4725941503564760e-01 1.0971897950965598e-01 7.1590601234367446e-02 -3.3710251947368652e-02 1.0000000000000000e+00 -7.5789473684210529e-01 5.7440443213296399e-01 -4.3533809593235173e-01 -0.0000000000000000e+00 -3.9665728276365325e+07 0.0000000000000000e+00 1.5573172071478419e-01 5.8767500060305407e-02 8.2513236906691323e-02 2.2944322930974499e-03 1.4557849361264199e-02 1.8280522986076542e-01 -1.3400765344253551e-01 -6.4343627260775790e-02 1.0000000000000000e+00 -7.4736842105263157e-01 5.5855955678670355e-01 -4.1744977401953637e-01 -0.0000000000000000e+00 -3.9625947171426475e+07 0.0000000000000000e+00 9.2876336967603779e-02 -4.0940490403193273e-02 1.5145674602386761e-01 9.6394449055709000e-02 1.5610593317834112e-01 6.4060815922217310e-02 -5.9704220427747284e-02 -1.6066877261638393e-01 1.0000000000000000e+00 -7.3684210526315785e-01 5.4293628808864258e-01 -4.0005831753899979e-01 -0.0000000000000000e+00 -3.9582766803961083e+07 0.0000000000000000e+00 4.3966158171504059e-02 -5.8562755482215997e-02 1.2357798060218469e-01 1.9170434050723462e-01 -2.3561230111389816e-02 -2.2363103807147941e-02 1.4298111395279336e-01 -9.6197546186340827e-02 1.0000000000000000e+00 -7.2631578947368425e-01 5.2753462603878121e-01 -3.8315672838606218e-01 -0.0000000000000000e+00 -3.9536072560850807e+07 0.0000000000000000e+00 1.0743314279239796e-01 -3.6346923353920750e-02 4.6880630255498698e-02 1.8225553956624468e-01 -1.4750697456160872e-01 -3.3469379096026097e-02 5.7525583277230091e-02 1.2379068414141191e-01 1.0000000000000000e+00 -7.1578947368421053e-01 5.1235457063711909e-01 -3.6673800845604315e-01 -0.0000000000000000e+00 -3.9485728182394587e+07 0.0000000000000000e+00 1.7342100106436448e-01 -1.5395432111407833e-02 7.9172820261359550e-02 6.2108500760562180e-02 -3.1076518586638784e-02 -1.8710358075597799e-02 -1.3888208416444467e-01 1.8768075934539957e-01 1.0000000000000000e+00 -7.0526315789473681e-01 4.9739612188365645e-01 -3.5079515964426300e-01 -0.0000000000000000e+00 -3.9431584623730086e+07 0.0000000000000000e+00 1.8836666274578873e-01 9.0412327752404948e-04 1.5060359477864796e-01 -4.0087339157973620e-02 2.0087749204929978e-02 -2.4650588642972794e-02 -6.1056568037803557e-02 6.5413163532273583e-02 1.0000000000000000e+00 -6.9473684210526321e-01 4.8265927977839340e-01 -3.3532118384604176e-01 -0.0000000000000000e+00 -3.9373503040717266e+07 0.0000000000000000e+00 1.7033690350785233e-01 6.5333595170886341e-02 1.2222121849978677e-01 -5.7205993379818069e-02 6.7165332648059933e-03 -1.2647530956253664e-01 1.4654443959756816e-01 -2.5926429451922754e-02 1.0000000000000000e+00 -6.8421052631578949e-01 4.6814404432132967e-01 -3.2030908295669924e-01 -0.0000000000000000e+00 -3.9311345987254620e+07 0.0000000000000000e+00 1.2080380069246013e-01 1.6888488166618251e-01 9.1046164371871487e-03 1.4290904643907976e-03 1.1795846482993963e-01 -1.4167475525013642e-01 1.4137877394480627e-01 -1.1732256976360228e-01 1.0000000000000000e+00 -6.7368421052631577e-01 4.5385041551246535e-01 -3.0575185887155559e-01 -0.0000000000000000e+00 -3.9244973542355932e+07 0.0000000000000000e+00 3.4438283090011089e-02 2.0109121873491556e-01 -5.7547939779479883e-02 1.2132532792943161e-01 1.0628739055560422e-01 5.0316850203156537e-02 2.2046370825158240e-02 -1.7963881306520071e-01 1.0000000000000000e+00 -6.6315789473684206e-01 4.3977839335180047e-01 -2.9164251348593084e-01 -0.0000000000000000e+00 -3.9174248924560487e+07 0.0000000000000000e+00 4.0022795171004232e-02 1.0825652841681087e-01 -5.3682663711252271e-02 2.0519038176742430e-01 -1.1232513118570334e-01 1.9782604392290690e-01 -3.3256958790873037e-02 -5.2450197889903322e-02 1.0000000000000000e+00 -6.5263157894736845e-01 4.2592797783933523e-01 -2.7797404869514508e-01 -0.0000000000000000e+00 -3.9099039317975588e+07 0.0000000000000000e+00 1.2832572296168343e-01 -1.5194812833649152e-02 -2.8536438078967347e-02 2.1609125174964045e-01 -1.3020736704049873e-01 1.1099747085338199e-01 -2.9598523553562982e-02 4.9667653588594499e-02 1.0000000000000000e+00 -6.4210526315789473e-01 4.1229916897506924e-01 -2.6473946639451817e-01 -0.0000000000000000e+00 -3.9019207925871357e+07 0.0000000000000000e+00 1.8184513174486536e-01 -5.9695820017393535e-02 -1.0973501903449952e-02 1.8904657943619874e-01 3.7431734389739568e-03 -2.8005730185736164e-03 -1.4136948114045254e-02 1.3534261454652096e-02 1.0000000000000000e+00 -6.3157894736842102e-01 3.9889196675900274e-01 -2.5193176847937016e-01 -0.0000000000000000e+00 -3.8934600623322882e+07 0.0000000000000000e+00 1.8297564804199995e-01 -4.4895630844157534e-02 -3.3474510048147358e-03 1.6457432378185066e-01 7.3077581806167691e-03 -3.5012448657977931e-02 -4.8882834141059702e-03 3.1604772410185444e-02 1.0000000000000000e+00 -6.2105263157894741e-01 3.8570637119113577e-01 -2.3954395684502119e-01 -0.0000000000000000e+00 -3.8845083451900810e+07 0.0000000000000000e+00 1.5906034912211336e-01 -2.1224053174835857e-02 5.0895817281502252e-03 1.5686116396855940e-01 3.0344329995368825e-02 -2.4001065562456333e-02 -1.8903568932038005e-02 1.2354060555552715e-02 1.0000000000000000e+00 -6.1052631578947369e-01 3.7274238227146816e-01 -2.2756903338679108e-01 -0.0000000000000000e+00 -3.8750522818035603e+07 0.0000000000000000e+00 9.1006358925856679e-02 -7.5118610931840489e-03 6.6503092049194315e-02 1.5068813391684041e-01 1.5196115402105520e-01 -1.0151995927430326e-02 -1.2469031978554224e-01 -1.6259604460167863e-02 1.0000000000000000e+00 -5.9999999999999998e-01 3.5999999999999999e-01 -2.1599999999999997e-01 -0.0000000000000000e+00 -3.8650779152144141e+07 0.0000000000000000e+00 -1.6548982784842764e-02 -2.1501591298845458e-03 1.6924530473158547e-01 1.4967622644336245e-01 1.2158673178356291e-01 -3.2376002920126139e-03 -1.4152794707912439e-01 2.3518729348581235e-02 1.0000000000000000e+00 -5.8947368421052626e-01 3.4747922437673123e-01 -2.0482985857996788e-01 -0.0000000000000000e+00 -3.8545724148709796e+07 0.0000000000000000e+00 -6.9653755348263402e-02 -5.2147702654514955e-04 2.1883206296378127e-01 1.5065005938590567e-01 8.1234965378308831e-03 -8.4483521185395067e-04 4.7698190340317581e-03 -1.3036681841290393e-02 1.0000000000000000e+00 -5.7894736842105265e-01 3.3518005540166207e-01 -1.9405161102201490e-01 -0.0000000000000000e+00 -3.8435214400087699e+07 0.0000000000000000e+00 -5.1934993648663420e-02 -1.1065424034858019e-04 2.0970028089474685e-01 1.4213240863825227e-01 -5.6572321965554090e-02 -1.8901221960316596e-04 4.4275793878134018e-02 1.2279296363450445e-02 1.0000000000000000e+00 -5.6842105263157894e-01 3.2310249307479222e-01 -1.8365825922146084e-01 -0.0000000000000000e+00 -3.8319113881397702e+07 0.0000000000000000e+00 3.6983956204560908e-02 1.1469452396336173e-03 1.7786979612228432e-01 8.4685026232228552e-02 -1.5576017970246842e-01 -3.9180483653459804e-03 1.6905921618673117e-02 1.4363154029705605e-01 1.0000000000000000e+00 -5.5789473684210522e-01 3.1124653739612185e-01 -1.7364280507362584e-01 -0.0000000000000000e+00 -3.8197280698436156e+07 0.0000000000000000e+00 1.5794822030965477e-01 3.7735217542446728e-02 1.2287402532520451e-01 -1.8335073917085422e-02 -1.5574704890232346e-01 -8.3925120824484084e-02 1.2150945715063380e-01 1.1850262633718094e-01 1.0000000000000000e+00 -5.4736842105263162e-01 2.9961218836565101e-01 -1.6399825047383004e-01 -0.0000000000000000e+00 -3.8069578593331799e+07 0.0000000000000000e+00 1.9775021803465878e-01 1.3671372568135648e-01 1.7276930071533921e-02 -5.2433983812670809e-02 4.5441320718522388e-02 -1.6094230768179973e-01 1.5277934764483300e-01 -3.8266562162837352e-02 1.0000000000000000e+00 -5.3684210526315790e-01 2.8819944598337949e-01 -1.5471759731739321e-01 -0.0000000000000000e+00 -3.7935869323744483e+07 0.0000000000000000e+00 1.0740337717159122e-01 2.1022899146327012e-01 -6.1299609340202010e-02 4.3454282349327800e-02 1.9647369631285061e-01 -4.5350830568912259e-02 4.1433286329292704e-02 -1.9276239601680384e-01 1.0000000000000000e+00 -5.2631578947368418e-01 2.7700831024930744e-01 -1.4579384749963548e-01 -0.0000000000000000e+00 -3.7796023122095928e+07 0.0000000000000000e+00 -1.4215621414284001e-02 2.1744953396946481e-01 -6.5883845005009184e-02 1.6333565624853574e-01 1.0679948462033843e-01 5.1769973659723287e-02 -3.2194092559940712e-02 -1.2551613187220623e-01 1.0000000000000000e+00 -5.1578947368421058e-01 2.6603878116343493e-01 -1.3722000291587699e-01 -0.0000000000000000e+00 -3.7649889965446725e+07 0.0000000000000000e+00 -2.1994108886333741e-02 1.8927699498646933e-01 -3.8376040914096646e-02 1.7131554407418167e-01 -8.6784250650728478e-02 1.4359723816079500e-02 -3.6947852239381268e-02 1.0971229283503742e-01 1.0000000000000000e+00 -5.0526315789473686e-01 2.5529085872576179e-01 -1.2898906546143754e-01 -0.0000000000000000e+00 -3.7497316265670031e+07 0.0000000000000000e+00 9.1811869144260094e-02 1.4695496983703263e-01 1.7729552665523007e-03 5.8767095727033349e-02 -1.9596499367135350e-01 7.7361832482246623e-02 -6.5188462472629743e-02 1.8280342218045492e-01 1.0000000000000000e+00 -4.9473684210526314e-01 2.4476454293628808e-01 -1.2109403703163725e-01 -0.0000000000000000e+00 -3.7338169264406279e+07 0.0000000000000000e+00 1.8306002678261177e-01 5.5439888209612029e-02 9.6283794815360410e-02 -3.4996668163597089e-02 -5.1804741027097784e-02 1.6594465421042104e-01 -1.6085778483598709e-01 4.6511627709091036e-02 1.0000000000000000e+00 -4.8421052631578948e-01 2.3445983379501387e-01 -1.1352791952179618e-01 -0.0000000000000000e+00 -3.7172326619055934e+07 0.0000000000000000e+00 1.4324579548557007e-01 -4.3540479342225637e-02 1.9285128574686822e-01 8.1291219084947131e-03 1.6599096722370082e-01 8.1719954474456569e-02 -1.0011559455168681e-01 -1.4673609329855580e-01 1.0000000000000000e+00 -4.7368421052631576e-01 2.2437673130193903e-01 -1.0628371482723427e-01 -0.0000000000000000e+00 -3.6999660033267289e+07 0.0000000000000000e+00 1.7927959210752555e-02 -7.0631859780513950e-02 2.1999075710869140e-01 1.3293553272129058e-01 1.5227812176683891e-01 -1.6871688629898421e-02 3.9865563316927824e-02 -1.7493208269286115e-01 1.0000000000000000e+00 -4.6315789473684210e-01 2.1451523545706372e-01 -9.9354424843271616e-02 -0.0000000000000000e+00 -3.6820030059284538e+07 0.0000000000000000e+00 -5.4721786079557348e-02 -4.8236631544414314e-02 1.9882222644191866e-01 2.0344308115693138e-01 2.6089110109824766e-02 -3.9887978142612079e-02 2.6738451663599842e-02 -1.3927785112094214e-02 1.0000000000000000e+00 -4.5263157894736844e-01 2.0487534626038784e-01 -9.2733051465228172e-02 -0.0000000000000000e+00 -3.6633274691983856e+07 0.0000000000000000e+00 -5.2939077440154893e-02 -2.2077204420055511e-02 1.7014165230529651e-01 2.0466167119890102e-01 -3.2091590291564463e-02 -2.5353413172512607e-02 2.0062332963361327e-02 3.7176426557142955e-02 1.0000000000000000e+00 -4.4210526315789472e-01 1.9545706371191135e-01 -8.6412596588423957e-02 -0.0000000000000000e+00 -3.6439258355741180e+07 0.0000000000000000e+00 -2.8368684258698883e-02 -6.5326696738188961e-03 1.5907556382876520e-01 1.7651151390245995e-01 -2.9318585685865400e-02 -1.4349982160473887e-02 2.9724200146709720e-02 1.4803601547544343e-02 1.0000000000000000e+00 -4.3157894736842106e-01 1.8626038781163437e-01 -8.0386062108179043e-02 -0.0000000000000000e+00 -3.6237849955159605e+07 0.0000000000000000e+00 -1.0939995492063020e-02 3.5551552001175248e-02 1.5296722297617443e-01 1.2264360977493392e-01 -1.4078391306374482e-02 -8.7221277924167465e-02 -1.9044411797657257e-02 1.2068399478920640e-01 1.0000000000000000e+00 -4.2105263157894735e-01 1.7728531855955676e-01 -7.4646449919813368e-02 -0.0000000000000000e+00 -3.6028915291220829e+07 0.0000000000000000e+00 -3.3414050335288385e-03 1.3618602296187249e-01 1.3156598803018277e-01 3.4896284016351968e-02 -4.8773371649446359e-03 -1.6179738022522952e-01 5.8664228336120644e-02 1.0702228757277181e-01 1.0000000000000000e+00 -4.1052631578947368e-01 1.6853185595567868e-01 -6.9186761918647033e-02 -0.0000000000000000e+00 -3.5812314029410847e+07 0.0000000000000000e+00 5.0906709943765328e-03 2.0417342571709904e-01 5.0401278513766204e-02 4.0121666418745344e-02 -1.8901535823182544e-02 -2.7992687684244611e-02 1.5884528688943000e-01 -1.1215730732557562e-01 1.0000000000000000e+00 -4.0000000000000002e-01 1.6000000000000003e-01 -6.4000000000000015e-02 -0.0000000000000000e+00 -3.5587902398848765e+07 0.0000000000000000e+00 6.6503092049194315e-02 1.5190460181838772e-01 -4.4898761562050013e-02 1.2717679149317535e-01 -1.2469031978554224e-01 1.7222491478578517e-01 7.9617634403327781e-02 -1.2629299555565593e-01 1.0000000000000000e+00 -3.8947368421052631e-01 1.5168975069252078e-01 -5.9079166059192299e-02 -0.0000000000000000e+00 -3.5355541558413237e+07 0.0000000000000000e+00 1.6924530473158547e-01 5.7813335883083829e-02 -7.0945854760163657e-02 1.4410960340571496e-01 -1.4152794707912439e-01 7.1590601234367446e-02 -1.7390445637262820e-02 8.7667705243026917e-02 1.0000000000000000e+00 -3.7894736842105264e-01 1.4360110803324100e-01 -5.4417261991543966e-02 -0.0000000000000000e+00 -3.5115099058204055e+07 0.0000000000000000e+00 2.1883206296378127e-01 8.2513416628344272e-02 -4.8300691699543583e-02 4.6262102082296433e-02 4.7698190340317581e-03 -1.3400836236012101e-01 -3.9999015100023447e-02 1.6824935694483101e-01 1.0000000000000000e+00 -3.6842105263157893e-01 1.3573407202216065e-01 -5.0007289692374973e-02 -0.0000000000000000e+00 -3.4866436158003025e+07 0.0000000000000000e+00 2.0970028089474685e-01 1.5740056826346380e-01 -2.2088987412662978e-02 -4.5224820101560564e-02 4.4275793878134018e-02 -7.7253408640873558e-02 -2.5374601531988065e-02 5.8145972351154811e-02 1.0000000000000000e+00 -3.5789473684210527e-01 1.2808864265927977e-01 -4.5842251057005394e-02 -0.0000000000000000e+00 -3.4609428135132596e+07 0.0000000000000000e+00 1.7903777288316586e-01 1.8910188123201388e-01 -7.7026326635749130e-03 -5.9751297652897432e-02 1.3025265679763930e-02 2.2488780400294686e-02 -1.0473003102067882e-02 -2.4181809130075956e-02 1.0000000000000000e+00 -3.4736842105263160e-01 1.2066481994459835e-01 -4.1915147980755220e-02 -0.0000000000000000e+00 -3.4343941110639565e+07 0.0000000000000000e+00 1.6061288779989019e-01 1.7842422385602436e-01 -2.1876212702140792e-03 -3.6627101125479865e-02 3.7591023000768276e-02 -1.8686169739423706e-05 -3.3034327947330652e-03 -3.3928990275288611e-02 1.0000000000000000e+00 -3.3684210526315789e-01 1.1346260387811634e-01 -3.8218982358944449e-02 -0.0000000000000000e+00 -3.4069813706143320e+07 0.0000000000000000e+00 1.5399106008616245e-01 1.6129738323672316e-01 -5.2792733110310866e-04 -1.5453626016904153e-02 -8.1611523566562372e-03 2.6840279882962656e-02 -8.5758914132577559e-04 -1.8809739866262339e-02 1.0000000000000000e+00 -3.2631578947368423e-01 1.0648199445983381e-01 -3.4746756086893135e-02 -0.0000000000000000e+00 -3.3786908936772466e+07 0.0000000000000000e+00 1.4298555988347192e-01 1.5601979571594721e-01 5.8320787330212983e-03 -5.0503926884532951e-03 1.3631643973506719e-02 1.1022901304971910e-02 -1.7740233541584896e-02 -7.1205556804665180e-03 1.0000000000000000e+00 -3.1578947368421051e-01 9.9722991689750684e-02 -3.1491471059921269e-02 -0.0000000000000000e+00 -3.3495079538397674e+07 0.0000000000000000e+00 8.6041788334626473e-02 1.4933335804331696e-01 6.6670845869462786e-02 -1.3602684486988478e-03 1.4006821465228125e-01 -1.2692601934889872e-02 -1.2441038191784466e-01 -2.1059969516319570e-03 1.0000000000000000e+00 -3.0526315789473685e-01 9.3185595567867041e-02 -2.8446129173348884e-02 -0.0000000000000000e+00 -3.3194207260061242e+07 0.0000000000000000e+00 1.9357360471847010e-02 1.1190052342175456e-01 1.6927881114297239e-01 -3.1430577635336443e-04 3.4956141023667744e-02 1.0737250903658854e-01 -1.4146939027145361e-01 -5.1934602779551791e-04 1.0000000000000000e+00 -2.9473684210526313e-01 8.6869806094182808e-02 -2.5603732322495985e-02 -0.0000000000000000e+00 -3.2884149286440846e+07 0.0000000000000000e+00 6.6603362128221322e-02 1.3929299345066239e-02 2.1883810893506717e-01 -6.3880433476327442e-05 -1.5354899403779046e-01 1.4789177314831253e-01 4.7807652831930933e-03 -1.1174587499686113e-04 1.0000000000000000e+00 -2.8421052631578947e-01 8.0775623268698055e-02 -2.2957282402682605e-02 -0.0000000000000000e+00 -3.2564763542401820e+07 0.0000000000000000e+00 1.4630748208767322e-01 -6.2153849851647971e-02 2.0970137016097318e-01 5.9320392469887186e-03 -6.6992599968341504e-02 4.0078905610380966e-02 4.4277826986989482e-02 -1.7570376572601729e-02 1.0000000000000000e+00 -2.7368421052631581e-01 7.4903047091412753e-02 -2.0499781309228755e-02 -0.0000000000000000e+00 -3.2235910996290851e+07 0.0000000000000000e+00 1.2219866686111240e-01 -6.6072630346525565e-02 1.7786979612228432e-01 6.6689891161836240e-02 1.4084140145522778e-01 -3.2511422854075087e-02 1.6905921618673117e-02 -1.2437666637191104e-01 1.0000000000000000e+00 -2.6315789473684209e-01 6.9252077562326861e-02 -1.8224230937454435e-02 -0.0000000000000000e+00 -3.1897453852406133e+07 0.0000000000000000e+00 4.6562990343610049e-02 -3.8496771687101639e-02 1.2287402532520451e-01 1.6928214527850768e-01 5.7000139595247135e-02 -3.6706390367607596e-02 1.2150945715063380e-01 -1.4146329261726617e-01 1.0000000000000000e+00 -2.5263157894736843e-01 6.3822714681440448e-02 -1.6123633182679693e-02 -0.0000000000000000e+00 -3.1549256225830898e+07 0.0000000000000000e+00 7.9108176051305282e-02 -1.5916909137952982e-02 1.7276930071533921e-02 2.1883869298999215e-01 -1.3899421988458102e-01 -1.9555193287451748e-02 1.5277934764483300e-01 4.7818640459180925e-03 1.0000000000000000e+00 -2.4210526315789474e-01 5.8614958448753467e-02 -1.4190989940224523e-02 -0.0000000000000000e+00 -3.1191183393587172e+07 0.0000000000000000e+00 1.5059181178604050e-01 7.9346903717546994e-04 -6.1299609340202010e-02 2.0970137016097318e-01 -6.1077756397279012e-02 -2.4839600862575958e-02 4.1433286329292704e-02 4.4277826986989482e-02 1.0000000000000000e+00 -2.3157894736842105e-01 5.3628808864265930e-02 -1.2419303105408952e-02 -0.0000000000000000e+00 -3.0823097233739205e+07 0.0000000000000000e+00 1.2338720903179384e-01 6.5312563649638417e-02 -6.5883845005009184e-02 1.7786979612228432e-01 1.4266010677815580e-01 -1.2651270198897344e-01 -3.2194092559940712e-02 1.6905921618673117e-02 1.0000000000000000e+00 -2.2105263157894736e-01 4.8864265927977837e-02 -1.0801574573552995e-02 -0.0000000000000000e+00 -3.0444855215506077e+07 0.0000000000000000e+00 4.6843168115169163e-02 1.6896481616332268e-01 -3.8459620343475762e-02 1.2287402532520451e-01 5.7459750774509627e-02 -1.4198814727881801e-01 -3.6641146885318263e-02 1.2150945715063380e-01 1.0000000000000000e+00 -2.1052631578947367e-01 4.4321329639889190e-02 -9.3308062399766710e-03 -0.0000000000000000e+00 -3.0056331771375515e+07 0.0000000000000000e+00 7.9166369956801597e-02 2.1877404877993789e-01 -1.5910458833395022e-02 1.7276930071533921e-02 -1.3889483809391651e-01 4.6697283257817260e-03 -1.9542439357979925e-02 1.5277934764483300e-01 1.0000000000000000e+00 -2.0000000000000001e-01 4.0000000000000008e-02 -8.0000000000000019e-03 -0.0000000000000000e+00 -2.9657376031923760e+07 0.0000000000000000e+00 1.5654632775201785e-01 2.0968958716836569e-01 -5.1492639361944087e-03 -6.1299609340202010e-02 -7.8607789359785296e-02 4.4256638627514028e-02 -7.2883795405942276e-03 4.1433286329292704e-02 1.0000000000000000e+00 -1.8947368421052632e-01 3.5900277008310250e-02 -6.8021577489429958e-03 -0.0000000000000000e+00 -2.9247849788508385e+07 0.0000000000000000e+00 1.8891309589049748e-01 1.7786780989340983e-01 -1.3793137410722976e-03 -6.4715868244127644e-02 2.2171450106160298e-02 1.6902244738169939e-02 -2.1397124975655688e-03 -3.6074748498849903e-02 1.0000000000000000e+00 -1.7894736842105263e-01 3.2022160664819943e-02 -5.7302813821256742e-03 -0.0000000000000000e+00 -2.8827620597817719e+07 0.0000000000000000e+00 1.7830349308301938e-01 1.2295729395787997e-01 -3.1763991188864764e-04 -7.2075786879009091e-04 2.2277570203424468e-04 1.2120216277613968e-01 -5.2544368198296078e-04 -1.2055958103518379e-01 1.0000000000000000e+00 -1.6842105263157894e-01 2.8365650969529085e-02 -4.7773727948680561e-03 -0.0000000000000000e+00 -2.8396549759912662e+07 0.0000000000000000e+00 1.4360733911056495e-01 3.4960344171481243e-02 -6.4464488401335120e-05 1.2080367118123352e-01 7.2474257985726140e-02 1.0713332453018319e-01 -1.1284463772185742e-04 -1.8048293935946916e-01 1.0000000000000000e+00 -1.5789473684210525e-01 2.4930747922437671e-02 -3.9364338824901587e-03 -0.0000000000000000e+00 -2.7954504561053500e+07 0.0000000000000000e+00 5.4585647698166068e-02 4.0133449411352815e-02 5.9320392469887186e-03 1.9913590528747951e-01 1.6459027349150931e-01 -1.1213611896610017e-01 -1.7570376572601729e-02 -3.5090021896380212e-02 1.0000000000000000e+00 -1.4736842105263157e-01 2.1717451523545702e-02 -3.2004665403119982e-03 -0.0000000000000000e+00 -2.7501342752012245e+07 0.0000000000000000e+00 -4.3729264683742025e-02 1.2717877772204980e-01 6.6689891161836240e-02 1.5054631959856335e-01 8.1402624180322181e-02 -1.2628931867515275e-01 -1.2437666637191104e-01 1.7012259471465641e-01 1.0000000000000000e+00 -1.3684210526315790e-01 1.8725761772853188e-02 -2.5624726636535944e-03 -0.0000000000000000e+00 -2.7036898926030204e+07 0.0000000000000000e+00 -7.0585431694760711e-02 1.4410991420241864e-01 1.6919856584912857e-01 5.7499340903434129e-02 -1.7243637466250763e-02 8.7668294263458041e-02 -1.4115658726320315e-01 7.1071844227003050e-02 1.0000000000000000e+00 -1.2631578947368421e-01 1.5955678670360112e-02 -2.0154541478349616e-03 -0.0000000000000000e+00 -2.6561044027604885e+07 0.0000000000000000e+00 -3.0559847470677894e-02 4.6262281803949382e-02 2.0115527889004484e-01 8.2449176751562048e-02 -8.5546046269148218e-02 1.6824864802724551e-01 5.0427887160567904e-02 -1.3411869039994689e-01 1.0000000000000000e+00 -1.1578947368421053e-01 1.3407202216066482e-02 -1.5524128881761190e-03 -0.0000000000000000e+00 -2.6073662987834934e+07 0.0000000000000000e+00 7.9354765065273003e-02 -3.9280997861964380e-02 1.0826831140941834e-01 1.5144496303126015e-01 -1.7892485157676094e-01 4.0596784138028544e-02 1.9784723228238235e-01 -5.9725408787222739e-02 1.0000000000000000e+00 -1.0526315789473684e-01 1.1080332409972297e-02 -1.1663507799970839e-03 -0.0000000000000000e+00 -2.5574620168755420e+07 0.0000000000000000e+00 1.8536197629235854e-01 6.9405797378132780e-03 -1.4024849843893137e-02 1.2240801761242869e-01 -1.0456455233677675e-01 -1.4855479862148382e-01 1.0712049179497599e-01 1.4685809301119937e-01 1.0000000000000000e+00 -9.4736842105263161e-02 8.9750692520775624e-03 -8.5026971861787448e-04 -0.0000000000000000e+00 -2.5063768014097415e+07 0.0000000000000000e+00 1.8046580350176308e-01 1.3257177552035235e-01 -2.1956646746004206e-02 9.1414569841093644e-03 1.2069989202041136e-01 -1.7508498851806065e-01 -8.6718418148008028e-02 1.4144342840666449e-01 1.0000000000000000e+00 -8.4210526315789472e-02 7.0914127423822712e-03 -5.9717159935850702e-04 -0.0000000000000000e+00 -2.4540960104356453e+07 0.0000000000000000e+00 7.9327867962882714e-02 1.8570165287314069e-01 9.1818499170470996e-02 -5.7541130031616032e-02 1.4128889296442082e-01 3.1618147294305565e-02 -1.9595294865946716e-01 2.2057706919459078e-02 1.0000000000000000e+00 -7.3684210526315783e-02 5.4293628808864255e-03 -4.0005831753899977e-04 -0.0000000000000000e+00 -2.4006064559022680e+07 0.0000000000000000e+00 6.1245759079613778e-02 1.0321791872096504e-01 1.8306111604883807e-01 -4.7737752205429779e-02 -8.8326098732102454e-02 1.9072667660191583e-01 -5.1802707918242319e-02 -5.0804113895143846e-02 1.0000000000000000e+00 -6.3157894736842107e-02 3.9889196675900280e-03 -2.5193176847937020e-04 -0.0000000000000000e+00 -2.3458940367557507e+07 0.0000000000000000e+00 1.3583758405486746e-01 -1.6553095053473525e-02 1.4324579548557007e-01 3.8155439311743367e-02 -1.2005537111306841e-01 1.0889515078225322e-01 1.6599096722370082e-01 -1.5397151304497084e-01 1.0000000000000000e+00 -5.2631578947368418e-02 2.7700831024930744e-03 -1.4579384749963548e-04 -0.0000000000000000e+00 -2.2899451906451862e+07 0.0000000000000000e+00 1.8399529087474989e-01 -6.0009814997043241e-02 1.7927959210752555e-02 1.5830895417176138e-01 6.9807737309865725e-03 -3.3193300259380166e-03 1.5227812176683891e-01 -1.5559965171088030e-01 1.0000000000000000e+00 -4.2105263157894736e-02 1.7728531855955678e-03 -7.4646449919813377e-05 -0.0000000000000000e+00 -2.2327463736216143e+07 0.0000000000000000e+00 1.8349712506854510e-01 -4.4959690999286810e-02 -5.4721606357904400e-02 2.1549106226352449e-01 8.1525933924707084e-03 -3.5123485615389298e-02 2.6088401192239274e-02 -1.0571045060238371e-04 1.0000000000000000e+00 -3.1578947368421054e-02 9.9722991689750701e-04 -3.1491471059921275e-05 -0.0000000000000000e+00 -2.1742838455218170e+07 0.0000000000000000e+00 1.5917100336246193e-01 -2.1235836167443324e-02 -4.6995255200558708e-02 2.0884712964952720e-01 3.0533342214971990e-02 -2.4022253921931792e-02 -4.9640778504690730e-02 4.2923446268077758e-02 1.0000000000000000e+00 -2.1052631578947368e-02 4.4321329639889195e-04 -9.3308062399766721e-06 -0.0000000000000000e+00 -2.1145430968515009e+07 0.0000000000000000e+00 9.1027390447104617e-02 -7.5138473220585237e-03 3.8323193132011824e-02 1.7884898754164946e-01 1.5199854644749200e-01 -1.0155672807933508e-02 -1.5369157517727328e-01 1.2707935385629556e-02 1.0000000000000000e+00 -1.0526315789473684e-02 1.1080332409972299e-04 -1.1663507799970840e-06 -0.0000000000000000e+00 -2.0535093915450227e+07 0.0000000000000000e+00 -1.6461758423224705e-02 -2.1504699265882049e-03 1.5825888115376921e-01 1.6057573645626430e-01 1.2128671310411847e-01 -3.2381893124437317e-03 -1.5523438954914651e-01 3.7525779518478949e-02 1.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 0.0000000000000000e+00 -1.9911701609588567e+07 0.0000000000000000e+00 -5.1969936915044021e-02 -5.2147702654514955e-04 1.9781369413486305e-01 1.5398460978160450e-01 -3.7520718896508438e-02 -8.4483521185395067e-04 4.5551258913208763e-02 -8.1739062861280725e-03 1.0000000000000000e+00 1.0526315789473684e-02 1.1080332409972299e-04 1.1663507799970840e-06 1.1663507799970840e-06 -1.9275125611193918e+07 0.0000000000000000e+00 4.3554242863295220e-02 -1.1065424034858019e-04 1.0741516016419869e-01 1.4892829285684181e-01 -1.9259253904782070e-01 -1.8901221960316596e-04 1.9649488467232606e-01 -3.9195773484750167e-03 1.0000000000000000e+00 2.1052631578947368e-02 4.4321329639889195e-04 9.3308062399766721e-06 9.3308062399766721e-06 -1.8625232264776390e+07 0.0000000000000000e+00 1.6335470154090917e-01 1.1469452396336173e-03 -1.5381611946291066e-02 1.5156568896445566e-01 -1.2548241632627261e-01 -3.9180483653459804e-03 1.1068381743975080e-01 1.9575881099782574e-02 1.0000000000000000e+00 3.1578947368421054e-02 9.9722991689750701e-04 3.1491471059921275e-05 3.1491471059921275e-05 -1.7961888448384833e+07 0.0000000000000000e+00 1.7131887820971695e-01 3.7735217542446728e-02 -5.9649081134936638e-02 1.5081737464299355e-01 1.0971839048922487e-01 -8.3925120824484084e-02 -3.1719328344948406e-03 -2.2281423069238762e-02 1.0000000000000000e+00 4.2105263157894736e-02 1.7728531855955678e-03 7.4646449919813377e-05 7.4646449919813377e-05 -1.7284953087222628e+07 0.0000000000000000e+00 5.8767500060305407e-02 1.3671354595970353e-01 -2.7218846770421114e-02 1.3104469072529057e-01 1.8280522986076542e-01 -1.6094159876421424e-01 -8.0670516784514076e-02 5.7818684206681195e-02 1.0000000000000000e+00 5.2631578947368418e-02 2.7700831024930744e-03 1.4579384749963548e-04 1.4579384749963548e-04 -1.6594301290707462e+07 0.0000000000000000e+00 -4.0940490403193273e-02 2.0428516922367393e-01 8.0207916310492663e-02 5.6234446513013805e-02 6.4060815922217310e-02 -2.7801642355785985e-02 -1.7757250396670465e-01 1.4110708645670056e-01 1.0000000000000000e+00 6.3157894736842107e-02 3.9889196675900280e-03 2.5193176847937020e-04 2.5193176847937020e-04 -1.5889801689018261e+07 0.0000000000000000e+00 -5.8562755482215997e-02 1.5192563333963566e-01 1.8555076163387493e-01 2.1772084307412773e-02 -2.2363103807147941e-02 1.7226230721222197e-01 -1.0424722204264238e-01 -4.4792747514516867e-02 1.0000000000000000e+00 7.3684210526315783e-02 5.4293628808864255e-03 4.0005831753899977e-04 4.0005831753899977e-04 -1.5171332894279920e+07 0.0000000000000000e+00 -3.6346923353920750e-02 5.7816980815322772e-02 1.8041937541600986e-01 9.8332956382808728e-02 -3.3469379096026097e-02 7.1597287908986007e-02 1.2107184085676369e-01 -1.5885983590871644e-01 1.0000000000000000e+00 8.4210526315789472e-02 7.0914127423822712e-03 5.9717159935850702e-04 5.9717159935850702e-04 -1.4438759648172289e+07 0.0000000000000000e+00 -1.5395611833060778e-02 8.2513820961616330e-02 6.1651263610799256e-02 1.7053741723552357e-01 -1.8709649158012306e-02 -1.3400655467981051e-01 1.8694625217337149e-01 -3.5218249816830346e-02 1.0000000000000000e+00 9.4736842105263161e-02 8.9750692520775624e-03 8.5026971861787448e-04 8.5026971861787448e-04 -1.3691952932217911e+07 0.0000000000000000e+00 -5.0396989620721359e-03 1.5145674602386761e-01 -3.4242388166118555e-02 1.8761238274831019e-01 -7.1014004298465220e-03 -5.9704220427747284e-02 4.7696151459019592e-02 1.8903225455001421e-02 1.0000000000000000e+00 1.0526315789473684e-01 1.1080332409972297e-02 1.1663507799970839e-03 1.1663507799970839e-03 -1.2930769741141085e+07 0.0000000000000000e+00 -1.3582822198243730e-03 1.2241000384130316e-01 8.2988619576376543e-03 1.7133514021959093e-01 -2.1023200711287751e-03 1.4686176989170255e-01 -1.4645247855035504e-01 2.5522625776960533e-03 1.0000000000000000e+00 1.1578947368421053e-01 1.3407202216066482e-02 1.5524128881761190e-03 1.5524128881761190e-03 -1.2155081930344526e+07 0.0000000000000000e+00 -3.1399497964970549e-04 9.1417677808130239e-03 1.3296934992938117e-01 1.5842526652967609e-01 -5.1875700736440022e-04 1.4144401742709561e-01 -1.7487293686475924e-01 3.4287590206035223e-02 1.0000000000000000e+00 1.2631578947368421e-01 1.5955678670360112e-02 2.0154541478349616e-03 2.0154541478349616e-03 -1.1364771600352474e+07 0.0000000000000000e+00 -6.4060155129273298e-05 -5.7541309753268981e-02 2.0344912712821728e-01 1.5346313275505935e-01 -1.1103695741136719e-04 2.2058415837044570e-02 -1.3916838862932879e-02 -9.0187414979820118e-03 1.0000000000000000e+00 1.3684210526315790e-01 1.8725761772853188e-02 2.5624726636535944e-03 2.5624726636535944e-03 -1.0559703577307537e+07 0.0000000000000000e+00 -1.1782992607466748e-05 -5.3681574445025963e-02 2.0466276046512732e-01 1.4881763861649322e-01 -2.1188359475456707e-05 -3.3254925682017572e-02 3.7178459665998420e-02 -4.1085895680781741e-03 1.0000000000000000e+00 1.4736842105263157e-01 2.1717451523545702e-02 3.2004665403119982e-03 3.2004665403119982e-03 -9.7397521797204204e+06 0.0000000000000000e+00 1.1659905320070671e-03 -2.8536438078967347e-02 1.7651151390245995e-01 1.5154465744320772e-01 -3.8843328194123690e-03 -2.9598523553562982e-02 1.4803601547544343e-02 1.9538488673345775e-02 1.0000000000000000e+00 1.5789473684210525e-01 2.4930747922437671e-02 3.9364338824901587e-03 3.9364338824901587e-03 -8.9047886006812230e+06 0.0000000000000000e+00 3.7738551677982013e-02 -1.0973501903449952e-02 1.2264360977493392e-01 1.5081372971075463e-01 -8.3919023170296647e-02 -1.4136948114045254e-02 1.2068399478920640e-01 -2.2288109743857323e-02 1.0000000000000000e+00 1.6842105263157894e-01 2.8365650969529085e-02 4.7773727948680561e-03 4.7773727948680561e-03 -8.0546743582526855e+06 0.0000000000000000e+00 1.3671413001462854e-01 -3.3474510048147358e-03 3.4896284016351968e-02 1.3104392694871261e-01 -1.6094050000148924e-01 -4.8882834141059702e-03 1.0702228757277181e-01 5.7818294361541699e-02 1.0000000000000000e+00 1.7894736842105263e-01 3.2022160664819943e-02 5.7302813821256742e-03 5.7302813821256742e-03 -7.1892840764447525e+06 0.0000000000000000e+00 2.0428516922367393e-01 5.0895817281502252e-03 4.0121666418745344e-02 5.0290624273417621e-02 -2.7801642355785985e-02 -1.8903568932038005e-02 -1.1215730732557562e-01 1.5865627466982682e-01 1.0000000000000000e+00 1.8947368421052632e-01 3.5900277008310250e-02 6.8021577489429958e-03 6.8021577489429958e-03 -6.3084860786178336e+06 0.0000000000000000e+00 1.5075765657875412e-01 6.6503092049194315e-02 1.2717679149317535e-01 -4.3751816322416398e-02 1.7614296315113115e-01 -1.2469031978554224e-01 -1.2629299555565593e-01 7.5699586037981811e-02 1.0000000000000000e+00 2.0000000000000001e-01 4.0000000000000008e-02 8.0000000000000019e-03 8.0000000000000019e-03 -5.4121575398616791e+06 0.0000000000000000e+00 2.0078118340637101e-02 1.6916172530220636e-01 1.4419318283509408e-01 -3.3210637217716936e-02 1.5551572205885153e-01 -1.4122124172506137e-01 8.7360999888963911e-02 -1.0131556646174690e-01 1.0000000000000000e+00 2.1052631578947367e-01 4.4321329639889190e-02 9.3308062399766710e-03 9.3308062399766710e-03 -4.5001672449266911e+06 0.0000000000000000e+00 -5.4200309053012201e-02 2.0114882858548688e-01 6.3945516182243756e-02 8.8412854260159948e-02 2.6933945321678716e-02 5.0415133231096074e-02 1.2260333383018120e-01 -2.0094061386423767e-01 1.0000000000000000e+00 2.2105263157894736e-01 4.8864265927977837e-02 1.0801574573552995e-02 1.0801574573552995e-02 -3.5723705129204541e+06 0.0000000000000000e+00 -5.2828423199806310e-02 1.1421104438278823e-01 5.6208238649994260e-02 1.8219618181101094e-01 -3.1902578071961299e-02 1.8029601096040063e-01 -9.5423432944238051e-02 -5.3176243887774047e-02 1.0000000000000000e+00 2.3157894736842105e-01 5.3628808864265930e-02 1.2419303105408952e-02 1.2419303105408952e-02 -2.6286213762276992e+06 0.0000000000000000e+00 -2.8347652737450959e-02 5.1499050785936029e-02 1.3331132507416155e-01 1.4422300067606075e-01 -2.9281193259428608e-02 -1.3371841757522693e-02 -1.1827703524528801e-01 1.6178930411015408e-01 1.0000000000000000e+00 2.4210526315789474e-01 5.8614958448753467e-02 1.4190989940224523e-02 1.4190989940224523e-02 -1.6688142544999197e+06 0.0000000000000000e+00 -1.0936350559824077e-02 1.0950336742514234e-01 1.4602601284979363e-01 5.5629359545108691e-02 -1.4071704631755921e-02 -1.4395598224091452e-01 9.0073745519424694e-02 6.8293855114252955e-02 1.0000000000000000e+00 2.5263157894736843e-01 6.3822714681440448e-02 1.6123633182679693e-02 1.6123633182679693e-02 -6.9281623238137364e+05 0.0000000000000000e+00 -3.3410007002567768e-03 1.5625982776754024e-01 6.4402348998734621e-02 8.1985713908860280e-02 -4.8755294846341453e-03 1.5414729585004479e-02 1.2333603332189878e-01 -1.3486343490355079e-01 1.0000000000000000e+00 2.6315789473684209e-01 6.9252077562326861e-02 1.8224230937454435e-02 1.8224230937454435e-02 2.9950556338893622e+05 0.0000000000000000e+00 -8.5315124521965287e-04 9.2988080474178669e-02 5.6307109897735373e-02 1.5134500251729271e-01 -1.3523476100562744e-03 1.5629697850679977e-01 -9.5255609084110349e-02 -5.9895265756205913e-02 1.0000000000000000e+00 2.7368421052631581e-01 7.4903047091412753e-02 2.0499781309228755e-02 2.0499781309228755e-02 1.3082802235329971e+06 0.0000000000000000e+00 -1.8878534151638908e-04 4.3987189692751984e-02 1.3449834712741654e-01 1.2238897232005523e-01 -3.1733029413437370e-04 -2.3523837684953017e-02 -1.2212397563826358e-01 1.4682437746526575e-01 1.0000000000000000e+00 2.8421052631578947e-01 8.0775623268698055e-02 2.2957282402682605e-02 2.2957282402682605e-02 2.3336263527088910e+06 0.0000000000000000e+00 -3.7151343625874653e-05 1.0743678772463691e-01 1.8368463003063548e-01 9.1381228485740811e-03 -6.5243482289333469e-05 -1.4750028788699016e-01 6.4681143778096140e-03 1.4143733075247705e-01 1.0000000000000000e+00 2.9473684210526313e-01 8.6869806094182808e-02 2.5603732322495985e-02 2.5603732322495985e-02 3.3756763413345665e+06 0.0000000000000000e+00 -6.4503045579590711e-06 1.7342158511928946e-01 1.8343364896834086e-01 -5.7541893808193988e-02 -1.2753929471824879e-05 -3.1075419823913791e-02 8.0426551977843541e-03 2.2057317074319575e-02 1.0000000000000000e+00 3.0526315789473685e-01 9.3185595567867041e-02 2.8446129173348884e-02 2.8446129173348884e-02 4.4345758976545855e+06 0.0000000000000000e+00 5.9427329733698778e-03 1.8836666274578873e-01 1.5915922036985447e-01 -5.3681574445025963e-02 -1.7551221321981732e-02 2.0087749204929978e-02 3.0512153855496535e-02 -3.3254925682017572e-02 1.0000000000000000e+00 3.1578947368421051e-01 9.9722991689750684e-02 3.1491471059921269e-02 3.1491471059921269e-02 5.5104500529150888e+06 0.0000000000000000e+00 6.6691877390710710e-02 1.7033690350785233e-01 9.2193380979111672e-02 -2.8536438078967347e-02 -1.2437298949140786e-01 6.7165332648059933e-03 1.4811421362807964e-01 -2.9598523553562982e-02 1.0000000000000000e+00 3.2631578947368423e-01 1.0648199445983381e-01 3.4746756086893135e-02 3.4746756086893135e-02 6.6034118503785804e+06 0.0000000000000000e+00 1.6928245607521133e-01 1.2072022126308102e-01 2.1193213825378192e-02 -1.0973501903449952e-02 -1.4146270359683505e-01 1.1826517018400264e-01 3.7674395287884840e-02 -1.4136948114045254e-02 1.0000000000000000e+00 3.3684210526315789e-01 1.1346260387811634e-01 3.8218982358944449e-02 3.8218982358944449e-02 7.7135919904695749e+06 0.0000000000000000e+00 2.1883869298999215e-01 1.6754868990063766e-02 6.7060778999637194e-02 -3.3474510048147358e-03 4.7818640459180925e-03 1.5193341367025404e-01 -1.5281519578334787e-01 -4.8882834141059702e-03 1.0000000000000000e+00 3.4736842105263160e-01 1.2066481994459835e-01 4.1915147980755220e-02 4.1915147980755220e-02 8.8411533235496357e+06 0.0000000000000000e+00 2.0970137016097318e-01 -6.1410263580550593e-02 1.4640635333541432e-01 5.0895817281502252e-03 4.4277826986989482e-02 4.1244274109689540e-02 -6.6824776108213801e-02 -1.8903568932038005e-02 1.0000000000000000e+00 3.5789473684210527e-01 1.2808864265927977e-01 4.5842251057005394e-02 4.5842251057005394e-02 9.9862234019981027e+06 0.0000000000000000e+00 1.7903777288316586e-01 -6.5904876526257108e-02 1.2104973539260432e-01 6.6503092049194315e-02 1.3025265679763930e-02 -3.2231484986377505e-02 1.4475577294007058e-01 -1.2469031978554224e-01 1.0000000000000000e+00 3.6842105263157893e-01 1.3573407202216065e-01 5.0007289692374973e-02 5.0007289692374973e-02 1.1148932302210726e+07 0.0000000000000000e+00 1.6061288779989019e-01 -3.8379685846335589e-02 8.8274620044596595e-03 1.6916172530220636e-01 3.7591023000768276e-02 -3.6954538913999829e-02 1.4092467139930009e-01 -1.4122124172506137e-01 1.0000000000000000e+00 3.7894736842105264e-01 1.4360110803324100e-01 5.4417261991543966e-02 5.4417261991543966e-02 1.2329372176345021e+07 0.0000000000000000e+00 1.5399123980781540e-01 1.7723712116272938e-03 -5.7605369908398249e-02 2.0114864886383393e-01 -8.1618612742417312e-03 -6.5189561235354729e-02 2.1947378879633203e-02 5.0415842148681569e-02 1.0000000000000000e+00 3.8947368421052631e-01 1.5168975069252078e-01 5.9079166059192299e-02 5.9079166059192299e-02 1.3527680857389219e+07 0.0000000000000000e+00 1.4892938212306811e-01 9.6283794815360410e-02 -5.3693357437633427e-02 1.0826722214319204e-01 -3.9175442396195519e-03 -1.6085778483598709e-01 -3.3276114041493027e-02 1.9784519917352689e-01 1.0000000000000000e+00 4.0000000000000002e-01 1.6000000000000003e-01 6.4000000000000015e-02 6.4000000000000015e-02 1.4744013954299904e+07 0.0000000000000000e+00 1.5156568896445566e-01 1.9168330898598668e-01 -2.7370447546960278e-02 -1.5192826604774678e-02 1.9575881099782574e-02 -9.6234938612777626e-02 -3.3482856372975350e-02 1.1100114773388517e-01 1.0000000000000000e+00 4.1052631578947368e-01 1.6853185595567868e-01 6.9186761918647033e-02 6.9186761918647033e-02 1.5978502905917309e+07 0.0000000000000000e+00 1.5081737464299355e-01 1.8225189463400573e-01 2.6765049774532063e-02 -5.9611929791310761e-02 -2.2281423069238762e-02 1.2378399746679335e-01 -9.8055971284341889e-02 -3.1066893522055071e-03 1.0000000000000000e+00 4.2105263157894735e-01 1.7728531855955676e-01 7.4646449919813368e-02 7.4646449919813368e-02 1.7231268692723721e+07 0.0000000000000000e+00 1.3104451100363762e-01 6.2108096427290121e-02 1.3336649928816086e-01 -2.7212216744210212e-02 5.7819393124266691e-02 1.8767895166508908e-01 -1.6582807449800971e-01 -8.0658471772627749e-02 1.0000000000000000e+00 4.3157894736842106e-01 1.8626038781163437e-01 8.0386062108179043e-02 8.0386062108179043e-02 1.8502441721045829e+07 0.0000000000000000e+00 5.0290624273417621e-02 -3.4143516918377435e-02 2.0343092871222798e-01 8.0209005576718964e-02 1.5865627466982682e-01 4.7863975319147309e-02 -2.9156023074697720e-02 -1.7757047085784922e-01 1.0000000000000000e+00 4.4210526315789472e-01 1.9545706371191135e-01 8.6412596588423957e-02 8.6412596588423957e-02 1.9792148664706565e+07 0.0000000000000000e+00 -4.4919793083297938e-02 8.3179072500111015e-03 1.5056887123723772e-01 1.8671873839475647e-01 7.9580241976890995e-02 -1.4641876300442142e-01 1.7582563285699679e-01 -1.0812787798155156e-01 1.0000000000000000e+00 4.5263157894736844e-01 2.0487534626038784e-01 9.2733051465228172e-02 9.2733051465228172e-02 2.1100520328300230e+07 0.0000000000000000e+00 -7.0949499692402607e-02 1.3297268406491647e-01 2.0040966997011227e-02 2.1815823789069552e-01 -1.7397132311881381e-02 -1.7486683921057181e-01 1.5545047857656219e-01 3.7153406706898165e-02 1.0000000000000000e+00 4.6315789473684210e-01 2.1451523545706372e-01 9.9354424843271616e-02 9.9354424843271616e-02 2.2427701408503219e+07 0.0000000000000000e+00 -4.8301096032815641e-02 2.0344971118314231e-01 -5.4206939079223103e-02 1.9836521390377485e-01 -4.0000822780333935e-02 -1.3915740100207880e-02 2.6921900309792385e-02 2.6006461089467747e-02 1.0000000000000000e+00 4.7368421052631576e-01 2.2437673130193903e-01 1.0628371482723427e-01 1.0628371482723427e-01 2.3773807935681403e+07 0.0000000000000000e+00 -1.6145165173066793e-02 2.0466276046512732e-01 -5.2829512466032617e-02 1.6409895881795919e-01 -4.2923789745114335e-02 3.7178459665998420e-02 -3.1904611180816757e-02 3.7443697316359881e-02 1.0000000000000000e+00 4.8421052631578948e-01 2.3445983379501387e-01 1.1352791952179618e-01 1.1352791952179618e-01 2.5138965494905464e+07 0.0000000000000000e+00 5.8989244727135799e-02 1.7651151390245995e-01 -2.8347652737450959e-02 9.3532617906562601e-02 -1.3484599259347574e-01 1.4803601547544343e-02 -2.9281193259428608e-02 1.5018281815327478e-01 1.0000000000000000e+00 4.9473684210526314e-01 2.4476454293628808e-01 1.2109403703163725e-01 1.2109403703163725e-01 2.6523310263039686e+07 0.0000000000000000e+00 1.6701125537561815e-01 1.2264360977493392e-01 -1.0936350559824077e-02 2.1503874669492613e-02 -1.4445943103750508e-01 1.2068399478920640e-01 -1.4071704631755921e-02 3.8187054641061799e-02 1.0000000000000000e+00 5.0526315789473686e-01 2.5529085872576179e-01 1.2898906546143754e-01 1.2898906546143754e-01 2.7926978180642635e+07 0.0000000000000000e+00 2.0062717183728879e-01 3.4896284016351968e-02 -3.3408209786038308e-03 6.7124255099841462e-02 4.9571006936827623e-02 1.0702228757277181e-01 -4.8762384022196393e-03 -1.5270525758866149e-01 1.0000000000000000e+00 5.1578947368421058e-01 2.6603878116343493e-01 1.3722000291587699e-01 1.3722000291587699e-01 2.9350100892358869e+07 0.0000000000000000e+00 1.0815656790284345e-01 4.0121666418745344e-02 5.0906709943765328e-03 1.4641813632802181e-01 1.9765618695392373e-01 -1.1215730732557562e-01 -1.8901535823182544e-02 -6.6803587748738347e-02 1.0000000000000000e+00 5.2631578947368418e-01 2.7700831024930744e-01 1.4579384749963548e-01 1.4579384749963548e-01 3.0792791682040647e+07 0.0000000000000000e+00 -1.5213858126022602e-02 1.2834476825405688e-01 6.6503092049194315e-02 1.2105172162147879e-01 1.1096375530744837e-01 -1.3017365149456511e-01 -1.2469031978554224e-01 1.4475944982057376e-01 1.0000000000000000e+00 5.3684210526315790e-01 2.8819944598337949e-01 1.5471759731739321e-01 1.5471759731739321e-01 3.2255188890072003e+07 0.0000000000000000e+00 -5.9699154152928820e-02 1.8184846588040063e-01 1.6924530473158547e-01 8.8277728011633191e-03 -2.8066706727610594e-03 3.7492710931613937e-03 -1.4152794707912439e-01 1.4092526041973122e-01 1.0000000000000000e+00 5.4736842105263162e-01 2.9961218836565101e-01 1.6399825047383004e-01 1.6399825047383004e-01 3.3737424698033422e+07 0.0000000000000000e+00 -4.4896214899082541e-02 1.8297623209692498e-01 2.1883206296378127e-01 -5.7605190186745300e-02 -3.5013547420702930e-02 7.3088569433417683e-03 4.7698190340317581e-03 2.1946669962047711e-02 1.0000000000000000e+00 5.5789473684210522e-01 3.1124653739612185e-01 1.7364280507362584e-01 1.7364280507362584e-01 3.5239632326350734e+07 0.0000000000000000e+00 -2.1224053174835857e-02 1.5906034912211336e-01 2.0970028089474685e-01 -4.7749535198037242e-02 -2.4001065562456333e-02 3.0344329995368825e-02 4.4275793878134018e-02 -5.0825302254619301e-02 1.0000000000000000e+00 5.6842105263157894e-01 3.2310249307479222e-01 1.8365825922146084e-01 1.8365825922146084e-01 3.6761934819881216e+07 0.0000000000000000e+00 -7.5118610931840489e-03 9.1006358925856679e-02 1.7903777288316586e-01 3.8153453082868889e-02 -1.0151995927430326e-02 1.5196115402105520e-01 1.3025265679763930e-02 -1.5397518992547402e-01 1.0000000000000000e+00 5.7894736842105265e-01 3.3518005540166207e-01 1.9405161102201490e-01 1.9405161102201490e-01 3.8304448683193475e+07 0.0000000000000000e+00 -2.0665797005054313e-03 -1.6548982784842764e-02 1.6061288779989019e-01 1.5822506394567862e-01 -3.5443056460756225e-03 1.2158673178356291e-01 3.7591023000768276e-02 -1.5529353537724841e-01 1.0000000000000000e+00 5.8947368421052626e-01 3.4747922437673123e-01 2.0482985857996788e-01 2.0482985857996788e-01 3.9867328474246711e+07 0.0000000000000000e+00 1.7161937073402172e-02 -6.9653755348263402e-02 1.5399106008616245e-01 1.9780764816357715e-01 -4.6490858326503764e-02 8.1234965378308831e-03 -8.1611523566562372e-03 4.5540312664047428e-02 1.0000000000000000e+00 5.9999999999999998e-01 3.5999999999999999e-01 2.1599999999999997e-01 2.1599999999999997e-01 4.1450727994291037e+07 0.0000000000000000e+00 1.0132240451120625e-01 -5.1934993648663420e-02 1.4298555988347192e-01 1.0741407089797238e-01 -1.5375841751499603e-01 -5.6572321965554090e-02 1.3631643973506719e-02 1.9649285156347063e-01 1.0000000000000000e+00 6.1052631578947369e-01 3.7274238227146816e-01 2.2756903338679108e-01 2.2756903338679108e-01 4.3054756450479880e+07 0.0000000000000000e+00 1.9304159120581105e-01 3.6983956204560908e-02 8.6041788334626473e-02 -1.5381611946291066e-02 -9.4132618541648852e-02 -1.5576017970246842e-01 1.4006821465228125e-01 1.1068381743975080e-01 1.0000000000000000e+00 6.2105263157894741e-01 3.8570637119113577e-01 2.3954395684502119e-01 2.3954395684502119e-01 4.4679530292131960e+07 0.0000000000000000e+00 1.8264946904303456e-01 1.5794822030965477e-01 1.9357360471847010e-02 -5.9732660564315754e-02 1.2399604912009474e-01 -1.5574704890232346e-01 3.4956141023667744e-02 -2.8652274804318320e-03 1.0000000000000000e+00 6.3157894736842102e-01 3.9889196675900274e-01 2.5193176847937016e-01 2.5193176847937016e-01 4.6325184495605588e+07 0.0000000000000000e+00 7.9855390960713771e-02 1.9775039775631173e-01 6.6603362128221322e-02 -4.4902260870368436e-02 1.4214467442543610e-01 4.5440611800936892e-02 -1.5354899403779046e-01 -3.5024493669864265e-02 1.0000000000000000e+00 6.4210526315789473e-01 4.1229916897506924e-01 2.6473946639451817e-01 2.6473946639451817e-01 4.7991846162983879e+07 0.0000000000000000e+00 6.1357502586188668e-02 1.1334719941118741e-01 1.4630748208767322e-01 -2.1225142441062165e-02 -8.8135053403643832e-02 1.7892450809972435e-01 -6.6992599968341504e-02 -2.4003098671311795e-02 1.0000000000000000e+00 6.5263157894736845e-01 4.2592797783933523e-01 2.7797404869514508e-01 2.7797404869514508e-01 4.9679642121459484e+07 0.0000000000000000e+00 1.3469063881523385e-01 5.1308279215545170e-02 1.2219866686111240e-01 -7.5118610931840489e-03 -1.1613732274772244e-01 -1.3692848932160245e-02 1.4084140145522778e-01 -1.0151995927430326e-02 1.0000000000000000e+00 6.6315789473684206e-01 4.3977839335180047e-01 2.9164251348593084e-01 2.9164251348593084e-01 5.1388713588894203e+07 0.0000000000000000e+00 1.4626007333230318e-01 1.0946590528481281e-01 4.6562990343610049e-02 -2.0665797005054313e-03 9.0905894555470657e-02 -1.4402181474363498e-01 5.7000139595247135e-02 -3.5443056460756225e-03 1.0000000000000000e+00 6.7368421052631577e-01 4.5385041551246535e-01 3.0575185887155559e-01 3.0575185887155559e-01 5.3119190209192812e+07 0.0000000000000000e+00 4.6783579108841580e-02 1.5625301801967639e-01 7.9108355772958230e-02 1.7161937073402172e-02 1.6909419215668495e-01 1.5403393490703639e-02 -1.3899492880216652e-01 -4.6490858326503764e-02 1.0000000000000000e+00 6.8421052631578949e-01 4.6814404432132967e-01 3.2030908295669924e-01 3.2030908295669924e-01 5.4871193086425975e+07 0.0000000000000000e+00 -4.5114165861211988e-02 8.7043168968356177e-02 1.5653563402563669e-01 1.0132240451120625e-01 5.8334984570757975e-02 1.7384413361107057e-01 -7.8626944610405286e-02 -1.5375841751499603e-01 1.0000000000000000e+00 6.9473684210526321e-01 4.8265927977839340e-01 3.3532118384604176e-01 3.3532118384604176e-01 5.6644850124556869e+07 0.0000000000000000e+00 -5.8562289370767968e-02 -2.2704687697958727e-02 1.8891110966162300e-01 1.9304159120581105e-01 -2.8025072642548346e-02 1.0084915180645485e-01 2.2167773225657120e-02 -9.4132618541648852e-02 1.0000000000000000e+00 7.0526315789473681e-01 4.9739612188365645e-01 3.5079515964426300e-01 3.5079515964426300e-01 5.8440306078570858e+07 0.0000000000000000e+00 1.1154062814447488e-03 -6.1845668350574423e-02 1.7838676171569481e-01 1.8256588961365544e-01 -1.1784073775053558e-01 -6.0375842901551121e-03 -8.4518672459887964e-05 1.2430275447415774e-01 1.0000000000000000e+00 7.1578947368421053e-01 5.1235457063711909e-01 3.6673800845604315e-01 3.6673800845604315e-01 6.0257698738376126e+07 0.0000000000000000e+00 1.2126126777430234e-01 -4.5417107870702682e-02 1.6129075321051228e-01 6.2171976860766448e-02 -1.7974985002261207e-01 -3.5857283869831884e-02 2.6828234871076329e-02 1.8779069754008593e-01 1.0000000000000000e+00 7.2631578947368425e-01 5.2753462603878121e-01 3.8315672838606218e-01 3.8315672838606218e-01 6.2097159447418898e+07 0.0000000000000000e+00 2.0517859877481684e-01 -2.1334707415184437e-02 1.5601870644972088e-01 -4.0075556165366157e-02 -5.2471386249378776e-02 -2.4190077782059498e-02 1.1020868196116446e-02 6.5434351891749037e-02 1.0000000000000000e+00 7.3684210526315785e-01 5.4293628808864258e-01 4.0005831753899979e-01 4.0005831753899979e-01 6.3958803792432591e+07 0.0000000000000000e+00 2.1608926552076596e-01 -7.5328926144319734e-03 1.4933335804331696e-01 -5.7204007150943592e-02 4.9663976708091320e-02 -1.0189388353867119e-02 -1.2692601934889872e-02 -2.5922752571419572e-02 1.0000000000000000e+00 7.4736842105263157e-01 5.5855955678670355e-01 4.1744977401953637e-01 4.1744977401953637e-01 6.5842753707203761e+07 0.0000000000000000e+00 1.8896268921011594e-01 -2.1538040621234878e-03 1.1198410285113368e-01 1.4294012610944554e-03 1.3840377788283981e-02 -3.2442869666311747e-03 1.0706580368252554e-01 -1.1732198074317117e-01 1.0000000000000000e+00 7.5789473684210529e-01 5.7440443213296399e-01 4.3533809593235173e-01 4.3533809593235173e-01 6.7749140587336496e+07 0.0000000000000000e+00 1.4689090968190335e-01 -5.2188135981721137e-04 3.1612713445013561e-02 1.2132514820777866e-01 7.7250795524835256e-02 -8.4664289216444095e-04 1.0224575003366272e-01 -1.7963810414761522e-01 1.0000000000000000e+00 7.6842105263157889e-01 5.9047091412742370e-01 4.5373028138212557e-01 4.5373028138212557e-01 6.9678108008850276e+07 0.0000000000000000e+00 5.5428105217004565e-02 5.8331679992476050e-03 3.9279208899906853e-02 1.9924655952782810e-01 1.6592346585094558e-01 -1.7738200432729435e-02 -1.1349049968501190e-01 -3.4901009676777048e-02 1.0000000000000000e+00 7.7894736842105261e-01 6.0675900277008310e-01 4.7263332847353839e-01 4.7263332847353839e-01 7.1629787035486892e+07 0.0000000000000000e+00 -4.2374488810218575e-02 6.6670845869462786e-02 1.2698999238053343e-01 1.4939937435892975e-01 7.7835621655044193e-02 -1.2441038191784466e-01 -1.2660664896928714e-01 1.7404064308000236e-01 1.0000000000000000e+00 7.8947368421052633e-01 6.2326869806094187e-01 4.9205423531126991e-01 4.9205423531126991e-01 7.3604299627690956e+07 0.0000000000000000e+00 -3.2893308102531944e-02 1.6919523171359327e-01 1.4407276285879275e-01 1.9847702790366510e-02 -1.0079071180019505e-01 -1.4116268491739059e-01 8.7603050781168701e-02 1.5469025969742412e-01 1.0000000000000000e+00 8.0000000000000004e-01 6.4000000000000012e-01 5.1200000000000012e-01 5.1200000000000012e-01 7.5601781557119071e+07 0.0000000000000000e+00 8.8477498470214216e-02 2.0115487455677278e-01 4.6255472056085531e-02 -3.6580955108194153e-02 -2.0082847814410132e-01 5.0426079480257409e-02 1.6823731193294467e-01 -1.8823114750382463e-02 1.0000000000000000e+00 8.1052631578947365e-01 6.5695290858725752e-01 5.3247762064440873e-01 5.3247762064440873e-01 7.7622359293640539e+07 0.0000000000000000e+00 1.8220796480361842e-01 1.1421213364901453e-01 -4.5225909367786872e-02 4.8592852559141052e-02 -5.3155055528298592e-02 1.8029804406925609e-01 5.8143939242299353e-02 -1.8549317172682961e-01 1.0000000000000000e+00 8.2105263157894737e-01 6.7412742382271473e-01 5.5349409534917626e-01 5.5349409534917626e-01 7.9666160208791256e+07 0.0000000000000000e+00 1.4422498690493521e-01 5.1499050785936029e-02 -5.9751297652897432e-02 1.6471298376073357e-01 1.6179298099065725e-01 -1.3371841757522693e-02 -2.4181809130075956e-02 -1.2338009625514385e-01 1.0000000000000000e+00 8.3157894736842108e-01 6.9152354570637120e-01 5.7505642221898245e-01 5.7505642221898245e-01 8.1733321542241797e+07 0.0000000000000000e+00 5.5629670341812348e-02 1.0950336742514234e-01 -3.6627101125479865e-02 1.7171645261874577e-01 6.8294444134684051e-02 -1.4395598224091452e-01 -3.3928990275288611e-02 1.0993044214252626e-01 1.0000000000000000e+00 8.4210526315789469e-01 7.0914127423822704e-01 5.9717159935850694e-01 5.9717159935850694e-01 8.3823983214631885e+07 0.0000000000000000e+00 8.1985713908860280e-02 1.5625982776754024e-01 -1.5453626016904153e-02 7.6514974315381998e-02 -1.3486343490355079e-01 1.5414729585004479e-02 -1.8809739866262339e-02 1.3727024370352697e-01 1.0000000000000000e+00 8.5263157894736841e-01 7.2698060941828258e-01 6.1984662487243036e-01 6.1984662487243036e-01 8.5938269240893766e+07 0.0000000000000000e+00 1.5134500251729271e-01 9.2988080474178669e-02 -5.0503926884532951e-03 6.0504351340969015e-02 -5.9895265756205913e-02 1.5629697850679977e-01 -7.1205556804665180e-03 -8.9487401013700105e-02 1.0000000000000000e+00 8.6315789473684212e-01 7.4504155124653748e-01 6.4308849686543235e-01 6.4308849686543235e-01 8.8076292616174176e+07 0.0000000000000000e+00 1.2238897232005523e-01 4.3987189692751984e-02 -1.9229168781730597e-04 1.3450185347371746e-01 1.4682437746526575e-01 -2.3523837684953017e-02 -5.9866528905411446e-03 -1.1645465304185681e-01 1.0000000000000000e+00 8.7368421052631584e-01 7.6332409972299176e-01 6.6690421344219286e-01 6.6690421344219286e-01 9.0238174707291275e+07 0.0000000000000000e+00 9.1381228485740811e-03 1.0735320829525780e-01 3.7424556698332306e-02 1.4630650141805643e-01 1.4143733075247705e-01 -1.4719358253292714e-01 -8.4437780177661043e-02 9.0533945719118311e-02 1.0000000000000000e+00 8.8421052631578945e-01 7.8182825484764540e-01 6.9130077270739165e-01 6.9130077270739165e-01 9.2424069093082637e+07 0.0000000000000000e+00 -5.7541893808193988e-02 1.5573835074099510e-01 1.3665006985949926e-01 6.4460363182578001e-02 2.2057317074319575e-02 1.4569894373150526e-02 -1.6105153695890059e-01 1.2343612403014882e-01 1.0000000000000000e+00 8.9473684210526316e-01 8.0055401662049863e-01 7.1628517276570935e-01 7.1628517276570935e-01 9.4634114440171003e+07 0.0000000000000000e+00 -5.3681574445025963e-02 9.2877426233830093e-02 2.0427338623106647e-01 5.6317803624116536e-02 -3.3254925682017572e-02 1.5610796628719659e-01 -2.7822830715261444e-02 -9.5236453833490359e-02 1.0000000000000000e+00 9.0526315789473688e-01 8.1950138504155134e-01 7.4186441172182538e-01 7.4186441172182538e-01 9.6868439125350088e+07 0.0000000000000000e+00 -2.8536438078967347e-02 4.3966158171504059e-02 1.5192364711076117e-01 1.3333235659540948e-01 -2.9598523553562982e-02 -2.3561230111389816e-02 1.7225863033171879e-01 -1.1823964281885121e-01 1.0000000000000000e+00 9.1578947368421049e-01 8.3867036011080320e-01 7.6804548768041980e-01 7.6804548768041980e-01 9.9127162507476807e+07 0.0000000000000000e+00 -1.0889922474070838e-02 1.0734956336301885e-01 5.7816670018619114e-02 1.4594607835265347e-01 -1.4443653468108263e-02 -1.4720026920754570e-01 7.1596698888554883e-02 9.0387137548106261e-02 1.0000000000000000e+00 9.2631578947368420e-01 8.5806094182825488e-01 7.9483539874617293e-01 7.9483539874617293e-01 1.0141041149911594e+08 0.0000000000000000e+00 1.4335783373479641e-02 1.5573758696441714e-01 8.2513820961616330e-02 4.6719698675365254e-02 -5.0533597611170288e-02 1.4569504528011028e-02 -1.3400655467981051e-01 1.6898244628168810e-01 1.0000000000000000e+00 9.3684210526315792e-01 8.7767313019390591e-01 8.2224114302376450e-01 8.2224114302376450e-01 1.0371832633873191e+08 0.0000000000000000e+00 1.0057881824010886e-01 8.6933603994233902e-02 1.5145674602386761e-01 -3.9182126614223267e-02 -1.5492378601430459e-01 1.7365715450032285e-01 -5.9704220427747284e-02 4.0764607998156246e-02 1.0000000000000000e+00 9.4736842105263153e-01 8.9750692520775610e-01 8.5026971861787415e-01 8.5026971861787415e-01 1.0605103777870619e+08 0.0000000000000000e+00 1.9404181414642413e-01 -2.2725719219206651e-02 1.2241000384130316e-01 6.9596250301867321e-03 -9.8293212348255610e-02 1.0081175938001805e-01 1.4686176989170255e-01 -1.4852108307555020e-01 1.0000000000000000e+00 9.5789473684210524e-01 9.1756232686980610e-01 8.7892812363318262e-01 8.7892812363318262e-01 1.0840866954560232e+08 0.0000000000000000e+00 2.2027124567695419e-01 -6.1849313282813366e-02 9.1417677808130239e-03 1.3257510965588765e-01 4.0325763516621441e-02 -6.0442709647736733e-03 1.4144401742709561e-01 -1.7507889086387318e-01 1.0000000000000000e+00 9.6842105263157896e-01 9.3783933518005547e-01 9.0822335617436944e-01 9.0822335617436944e-01 1.1079135118806735e+08 0.0000000000000000e+00 1.9888006090410909e-01 -4.5417691925627689e-02 -5.7541309753268981e-02 1.8570223692806570e-01 2.6839251289435345e-02 -3.5858382632556876e-02 2.2058415837044570e-02 3.1619246057030564e-02 1.0000000000000000e+00 9.7894736842105268e-01 9.5833795013850420e-01 9.3816241434611469e-01 9.3816241434611469e-01 1.1319921057647294e+08 0.0000000000000000e+00 1.6420852379208145e-01 -2.1334707415184437e-02 -5.3681574445025963e-02 1.0321791872096504e-01 3.7630676427107587e-02 -2.4190077782059498e-02 -3.3254925682017572e-02 1.9072667660191583e-01 1.0000000000000000e+00 9.8947368421052628e-01 9.7905817174515231e-01 9.6875229625309800e-01 9.6875229625309800e-01 1.1563238092136064e+08 0.0000000000000000e+00 9.2385672666928986e-02 -7.5328926144319734e-03 -2.8536438078967347e-02 -1.6553095053473525e-02 1.5410086651862076e-01 -1.0189388353867119e-02 -2.9598523553562982e-02 1.0889515078225322e-01 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.0000000000000000e+00 1.1809099716453132e+08 """} # convert to arrays for convenience for key, val in fmristat.items(): fmristat[key] = np.fromstring(val, sep='\t').reshape(N_ROWS,-1).T # time vector time_vector = np.arange(N_ROWS)*2.5+1.25 nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/__init__.py000066400000000000000000000000001470056100100236270ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/test_FIAC.py000066400000000000000000000250161470056100100236470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This test ensures that the design matrices of the FIAC dataset match with fMRIstat's, at least on one block and one event trial. Taylor, J.E. & Worsley, K.J. (2005). \'Inference for magnitudes and delays of responses in the FIAC data using BRAINSTAT/FMRISTAT\'. Human Brain Mapping, 27,434-441 """ import numpy as np from nipy.algorithms.statistics.formula import formulae from nipy.algorithms.statistics.models.regression import OLSModel # testing imports from nipy.testing import assert_almost_equal from ... import design, hrf, utils from .. import hrf as delay # Local imports from .FIACdesigns import N_ROWS, altdescr, descriptions, fmristat, time_vector t = formulae.make_recarray(time_vector, 't') def _to_str(s): return s.decode('latin1') if isinstance(s, bytes) else str(s) def protocol(recarr, design_type, *hrfs): """ Create an object that can evaluate the FIAC Subclass of formulae.Formula, but not necessary. Parameters ---------- recarr : (N,) structured array with fields 'time' and 'event' design_type : str one of ['event', 'block']. Handles how the 'begin' term is handled. For 'block', the first event of each block is put in this group. For the 'event', only the first event is put in this group. The 'begin' events are convolved with hrf.glover. hrfs: symbolic HRFs Each event type ('SSt_SSp','SSt_DSp','DSt_SSp','DSt_DSp') is convolved with each of these HRFs in order. Returns ------- f: Formula Formula for constructing design matrices. contrasts : dict Dictionary of the contrasts of the experiment. """ event_types = np.unique(recarr['event']) N = recarr.size if design_type == 'block': keep = np.not_equal((np.arange(N)) % 6, 0) else: keep = np.greater(np.arange(N), 0) # This first frame was used to model out a potentially # 'bad' first frame.... _begin = recarr['time'][~keep] termdict = {} termdict['begin'] = utils.define('begin', utils.events(_begin, f=hrf.glover)) drift = formulae.natural_spline(utils.T, knots=[N_ROWS/2.+1.25], intercept=True) for i, t in enumerate(drift.terms): termdict['drift%d' % i] = t # After removing the first frame, keep the remaining # events and times times = recarr['time'][keep] events = recarr['event'][keep] # Now, specify the experimental conditions. This creates expressions named # SSt_SSp0, SSt_SSp1, etc. with one expression for each (eventtype, hrf) # pair for v in event_types: k = np.array([events[i] == v for i in range(times.shape[0])]) for l, h in enumerate(hrfs): # Make sure event type is a string (not byte string) term_name = '%s%d' % (_to_str(v), l) termdict[term_name] = utils.define(term_name, utils.events(times[k], f=h)) f = formulae.Formula(list(termdict.values())) Tcontrasts = {} Tcontrasts['average'] = (termdict['SSt_SSp0'] + termdict['SSt_DSp0'] + termdict['DSt_SSp0'] + termdict['DSt_DSp0']) / 4. Tcontrasts['speaker'] = (termdict['SSt_DSp0'] - termdict['SSt_SSp0'] + termdict['DSt_DSp0'] - termdict['DSt_SSp0']) * 0.5 Tcontrasts['sentence'] = (termdict['DSt_DSp0'] + termdict['DSt_SSp0'] - termdict['SSt_DSp0'] - termdict['SSt_SSp0']) * 0.5 Tcontrasts['interaction'] = (termdict['SSt_SSp0'] - termdict['SSt_DSp0'] - termdict['DSt_SSp0'] + termdict['DSt_DSp0']) # Ftest Fcontrasts = {} Fcontrasts['overall1'] = formulae.Formula(list(Tcontrasts.values())) return f, Tcontrasts, Fcontrasts def altprotocol(d, design_type, *hrfs): """ Create an object that can evaluate the FIAC. Subclass of formulae.Formula, but not necessary. Parameters ---------- d : np.recarray recarray defining design in terms of time, sentence speaker design_type : str in ['event', 'block'] Handles how the 'begin' term is handled. For 'block', the first event of each block is put in this group. For the 'event', only the first event is put in this group. The 'begin' events are convolved with hrf.glover. hrfs: symbolic HRFs Each event type ('SSt_SSp','SSt_DSp','DSt_SSp','DSt_DSp') is convolved with each of these HRFs in order. """ if design_type == 'block': keep = np.not_equal((np.arange(d.time.shape[0])) % 6, 0) else: keep = np.greater(np.arange(d.time.shape[0]), 0) # This first frame was used to model out a potentially # 'bad' first frame.... _begin = d.time[~keep] d = d[keep] termdict = {} termdict['begin'] = utils.define('begin', utils.events(_begin, f=hrf.glover)) drift = formulae.natural_spline(utils.T, knots=[N_ROWS/2.+1.25], intercept=True) for i, t in enumerate(drift.terms): termdict['drift%d' % i] = t # Now, specify the experimental conditions # The elements of termdict are DiracDeltas, rather than HRFs st = formulae.Factor('sentence', ['DSt', 'SSt']) sp = formulae.Factor('speaker', ['DSp', 'SSp']) indic = {} indic['sentence'] = st.main_effect indic['speaker'] = sp.main_effect indic['interaction'] = st.main_effect * sp.main_effect indic['average'] = formulae.I for key in indic: # The matrix signs will be populated with +- 1's # d is the recarray having fields ('time', 'sentence', 'speaker') signs = indic[key].design(d, return_float=True) for l, h in enumerate(hrfs): # symb is a sympy expression representing a sum # of [h(t-_t) for _t in d.time] symb = utils.events(d.time, amplitudes=signs, f=h) # the values of termdict will have keys like # 'average0', 'speaker1' # and values that are sympy expressions like average0(t), # speaker1(t) termdict['%s%d' % (key, l)] = utils.define("%s%d" % (key, l), symb) f = formulae.Formula(list(termdict.values())) Tcontrasts = {} Tcontrasts['average'] = termdict['average0'] Tcontrasts['speaker'] = termdict['speaker0'] Tcontrasts['sentence'] = termdict['sentence0'] Tcontrasts['interaction'] = termdict['interaction0'] # F tests Fcontrasts = {} Fcontrasts['overall1'] = formulae.Formula(list(Tcontrasts.values())) nhrf = len(hrfs) Fcontrasts['averageF'] = formulae.Formula([termdict['average%d' % j] for j in range(nhrf)]) Fcontrasts['speakerF'] = formulae.Formula([termdict['speaker%d' % j] for j in range(nhrf)]) Fcontrasts['sentenceF'] = formulae.Formula([termdict['sentence%d' % j] for j in range(nhrf)]) Fcontrasts['interactionF'] = formulae.Formula([termdict['interaction%d' % j] for j in range(nhrf)]) Fcontrasts['overall2'] = Fcontrasts['averageF'] + Fcontrasts['speakerF'] + Fcontrasts['sentenceF'] + Fcontrasts['interactionF'] return f, Tcontrasts, Fcontrasts def create_protocols(): # block and event protocols block, bTcons, bFcons = protocol(descriptions['block'], 'block', *delay.spectral) event, eTcons, eFcons = protocol(descriptions['event'], 'event', *delay.spectral) # Now create the design matrices and contrasts # The 0 indicates that it will be these columns # convolved with the first HRF X = {} c = {} D = {} for f, cons, design_type in [(block, bTcons, 'block'), (event, eTcons, 'event')]: X[design_type], c[design_type] = f.design(t, contrasts=cons) D[design_type] = f.design(t, return_float=False) return X, c, D def test_altprotocol(): block, bT, bF = protocol(descriptions['block'], 'block', *delay.spectral) event, eT, eF = protocol(descriptions['event'], 'event', *delay.spectral) blocka, baT, baF = altprotocol(altdescr['block'], 'block', *delay.spectral) eventa, eaT, eaF = altprotocol(altdescr['event'], 'event', *delay.spectral) for c in bT: baf = baT[c] if not isinstance(baf, formulae.Formula): baf = formulae.Formula([baf]) bf = bT[c] if not isinstance(bf, formulae.Formula): bf = formulae.Formula([bf]) X = baf.design(t, return_float=True) Y = bf.design(t, return_float=True) if X.ndim == 1: X.shape = (X.shape[0], 1) m = OLSModel(X) r = m.fit(Y) remaining = (r.resid**2).sum() / (Y**2).sum() assert_almost_equal(remaining, 0) for c in bF: baf = baF[c] if not isinstance(baf, formulae.Formula): baf = formulae.Formula([baf]) bf = bF[c] if not isinstance(bf, formulae.Formula): bf = formulae.Formula([bf]) X = baf.design(t, return_float=True) Y = bf.design(t, return_float=True) if X.ndim == 1: X.shape = (X.shape[0], 1) m = OLSModel(X) r = m.fit(Y) remaining = (r.resid**2).sum() / (Y**2).sum() assert_almost_equal(remaining, 0) def matchcol(col, X): """ Find the row in X with the highest correlation with 1D col. Used to find matching columns in fMRIstat's design with the design created by Protocol. Not meant as a generic helper function. """ c = np.array([np.corrcoef(col, X[i])[0,1] for i in range(X.shape[0])]) c = np.nan_to_num(c) ind = np.argmax(np.abs(c)) return ind, c[ind] def test_agreement(): # The test: does Protocol manage to recreate the design of fMRIstat? X, c, D = create_protocols() for design_type in ['event', 'block']: dd = D[design_type] for i in range(X[design_type].shape[1]): _, cmax = matchcol(X[design_type][:,i], fmristat[design_type]) if not dd.dtype.names[i].startswith('ns'): assert np.greater(np.abs(cmax), 0.999) # @dec.slow def test_event_design(): block = altdescr['block'] event = altdescr['event'] t = time_vector bkeep = np.not_equal((np.arange(block.time.shape[0])) % 6, 0) ekeep = np.greater(np.arange(event.time.shape[0]), 0) # Even though there is a FIAC block experiment # the design is represented as an event design # with the same event repeated several times in a row... Xblock, cblock = design.event_design(block[bkeep], t, hrfs=delay.spectral) Xevent, cevent = design.event_design(event[ekeep], t, hrfs=delay.spectral) nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/test_hrf.py000066400000000000000000000014121470056100100237160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing fmristat hrf module """ import numpy as np from ... import hrf from ...utils import T, lambdify_t from ..hrf import spectral_decomposition def test_spectral_decomposition(): # mainly to test that the second sign follows the first spectral, approx = spectral_decomposition(hrf.glover) val_makers = [lambdify_t(def_func(T)) for def_func in spectral] t = np.linspace(-15,50,3251) vals = [val_maker(t) for val_maker in val_makers] ind = np.argmax(vals[1]) assert vals[0][ind] > 0 # test that we can get several components spectral, approx = spectral_decomposition(hrf.glover, ncomp=5) assert len(spectral) == 5 nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/test_iterables.py000066400000000000000000000051361470056100100251200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.random import standard_normal as noise from numpy.testing import assert_array_almost_equal from nipy.algorithms.statistics.models.regression import OLSModel from nipy.core.image.image import rollimg from nipy.core.utils.generators import f_generator, parcels, write_data from nipy.io.api import load_image from nipy.modalities.fmri.api import FmriImageList, axis0_generator from nipy.testing import funcfile # Module globals FIMG = load_image(funcfile) # Put time on first axis FIMG = rollimg(FIMG, 't') FDATA = FIMG.get_fdata() FIL = FmriImageList.from_image(FIMG) # I think it makes more sense to use FDATA instead of FIL for GLM # purposes -- reduces some noticeable overhead in creating the # array from FmriImageList # create a design matrix, model and contrast matrix DESIGN = noise((FDATA.shape[0],3)) MODEL = OLSModel(DESIGN) CMATRIX = np.array([[1,0,0],[0,1,0]]) # two prototypical functions in a GLM analysis def fit(input): return MODEL.fit(input).resid def contrast(results): return results.Fcontrast(CMATRIX) # generators def result_generator(datag): for i, fdata in datag: yield i, MODEL.fit(fdata) def flatten_generator(ing): for i, r in ing: r = r.reshape((r.shape[0], -1)) yield i, r def unflatten_generator(ing): for i, r in ing: r = r.reshape(FIMG.shape[2:]) yield i, r def contrast_generator(resultg): for i, r in resultg: yield i, np.asarray(contrast(r)) def test_iterate_over_image(): # Fit a model, iterating over the slices of an array # associated to an FmriImage. c = np.zeros(FDATA.shape[1:]) + 0.5 res_gen = result_generator(flatten_generator(axis0_generator(FDATA))) write_data(c, unflatten_generator(contrast_generator(res_gen))) # Fit a model, iterating over the array associated to an # FmriImage, iterating over a list of ROIs defined by binary # regions of the same shape as a frame of FmriImage # this might really be an anatomical image or AR(1) coefficients a = np.asarray(FDATA[0]) p = np.greater(a, a.mean()) d = np.ones(FDATA.shape[1:]) * 2.0 flat_gen = flatten_generator(axis0_generator(FDATA, parcels(p))) write_data(d, contrast_generator(result_generator(flat_gen))) assert_array_almost_equal(d, c) e = np.zeros(FDATA.shape[1:]) + 3.0 flat_gen2 = flatten_generator(axis0_generator(FDATA, parcels(p))) write_data(e, f_generator(contrast, result_generator(flat_gen2))) assert_array_almost_equal(d, e) nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/test_model.py000066400000000000000000000120171470056100100242420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from nipy.algorithms.statistics.formula.formulae import Formula, Term, make_recarray from nipy.algorithms.statistics.models.regression import ( OLSModel, ar_bias_correct, ) from nipy.core.image.image import rollimg from nipy.io.api import load_image from nipy.testing import anatfile, funcfile from ...api import FmriImageList from .. import model from ..model import ModelOutputImage, estimateAR FUNC_IMG = load_image(funcfile) FUNC_LIST = FmriImageList.from_image(FUNC_IMG, volume_start_times=2.) def test_model_out_img(in_tmp_path): # Model output image cmap = load_image(anatfile).coordmap shape = (2,3,4) fname = 'myfile.nii' moi = ModelOutputImage(fname, cmap, shape) for i in range(shape[0]): moi[i] = i for i in range(shape[0]): assert_array_equal(moi[i], i) moi.save() pytest.raises(ValueError, moi.__setitem__, 0, 1) pytest.raises(ValueError, moi.__getitem__, 0) new_img = load_image(fname) for i in range(shape[0]): assert_array_equal(new_img[i].get_fdata(), i) del new_img def example_formula(): time_vector = make_recarray(FUNC_LIST.volume_start_times, 't') t = Term('t') con_defs = {'c': t, 'c2': t+t**2} # Formula - with an intercept f = Formula([t, t**2, t**3, 1]) # Design matrix and contrasts desmtx, cmatrices = f.design(time_vector, contrasts=con_defs) return f, desmtx, cmatrices def test_model_inputs(): f, _, cmatrices = example_formula() start_times = FUNC_LIST.volume_start_times for MC, kwargs in ((model.OLS, {}), (model.AR1, {'rho': FUNC_LIST[0]})): # This works correctly m = MC(FUNC_LIST, f, **kwargs) assert np.all(m.volume_start_times == start_times) # Need volume_start_times for image. pytest.raises(ValueError, MC, FUNC_IMG, f, **kwargs) # With timevector. m = MC(FUNC_IMG, f, outputs=[], volume_start_times=start_times, **kwargs) assert np.all(m.volume_start_times == start_times) @pytest.mark.parametrize("imp_img, kwargs", ((FUNC_LIST, {}), (rollimg(FUNC_IMG, 't'), {'volume_start_times': FUNC_LIST.volume_start_times}))) def test_run(in_tmp_path, imp_img, kwargs): ar1_fname = 'ar1_out.nii' f, _, cmatrices = example_formula() # Run OLS model outputs = [] outputs.append(model.output_AR1(ar1_fname, imp_img)) outputs.append(model.output_resid('resid_OLS_out.nii', imp_img)) ols = model.OLS(imp_img, f, outputs, **kwargs) ols.execute() # Run AR1 model outputs = [] outputs.append( model.output_T('T_out.nii', cmatrices['c'], imp_img)) outputs.append( model.output_F('F_out.nii', cmatrices['c2'], imp_img)) outputs.append( model.output_resid('resid_AR_out.nii', imp_img)) rho = load_image(ar1_fname) ar = model.AR1(imp_img, f, rho, outputs, **kwargs) ar.execute() f_img = load_image('F_out.nii') assert f_img.shape == FUNC_IMG.shape[:-1] f_data = f_img.get_fdata() assert np.all((f_data>=0) & (f_data<30)) resid_img = load_image('resid_AR_out.nii') assert resid_img.shape == FUNC_IMG.shape assert_array_almost_equal(np.mean(resid_img.get_fdata()), 0, 3) e_img = load_image('T_out_effect.nii') sd_img = load_image('T_out_sd.nii') t_img = load_image('T_out_t.nii') t_data = t_img.get_fdata() assert_array_almost_equal(t_data, e_img.get_fdata() / sd_img.get_fdata()) assert np.all(np.abs(t_data) < 6) # Need to delete to help windows delete temporary files del rho, resid_img, f_img, e_img, sd_img, t_img, f_data, t_data def test_ar_modeling(): # Compare against standard routines rng = np.random.RandomState(20110903) N = 10 Y = rng.normal(size=(N,1)) * 10 + 100 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] my_model = OLSModel(X) results = my_model.fit(Y) # fmristat wrapper rhos = estimateAR(results.resid, my_model.design, order=2) assert rhos.shape == (2,) assert np.all(np.abs(rhos <= 1)) # standard routine rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) # Make 2D and 3D Y Y = rng.normal(size=(N,4)) * 10 + 100 results = my_model.fit(Y) rhos = estimateAR(results.resid, my_model.design, order=2) assert rhos.shape == (2,4) assert np.all(np.abs(rhos <= 1)) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) # 3D results.resid = np.reshape(results.resid, (N,2,2)) rhos = estimateAR(results.resid, my_model.design, order=2) assert rhos.shape == (2,2,2) assert np.all(np.abs(rhos <= 1)) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) nipy-0.6.1/nipy/modalities/fmri/fmristat/tests/test_outputters.py000066400000000000000000000035231470056100100254020ustar00rootroot00000000000000""" Tests for regression module """ import numpy as np from numpy.testing import assert_array_almost_equal from nipy.algorithms.statistics.api import OLSModel from ..outputters import output_F, output_T N = 10 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] RNG = np.random.RandomState(20110901) Y = RNG.normal(size=(10,1)) * 10 + 100 MODEL = OLSModel(X) RESULTS = MODEL.fit(Y) C1 = [1, 0] def test_model(): # Check basics about the model fit # Check we fit the mean assert_array_almost_equal(RESULTS.theta[1], np.mean(Y)) def test_output_T(): # Check we get required outputs res = RESULTS.Tcontrast(C1) # all return values # default is all return values assert_array_almost_equal([res.effect, res.sd, res.t], output_T(RESULTS, C1)) assert_array_almost_equal([res.effect, res.sd, res.t], output_T(RESULTS, C1, ('effect', 'sd', 't'))) # Input order determines return order assert_array_almost_equal([res.t, res.effect, res.sd], output_T(RESULTS, C1, ('t', 'effect', 'sd'))) # And can select inputs assert_array_almost_equal([res.t], output_T(RESULTS, C1, ('t',))) assert_array_almost_equal([res.sd], output_T(RESULTS, C1, ('sd',))) assert_array_almost_equal([res.effect], output_T(RESULTS, C1, ('effect',))) def test_output_F(): # Test output_F convenience function rng = np.random.RandomState(ord('F')) Y = rng.normal(size=(10,1)) * 10 + 100 X = np.c_[rng.normal(size=(10,3)), np.ones((N,))] c1 = np.zeros((X.shape[1],)) c1[0] = 1 model = OLSModel(X) results = model.fit(Y) # Check we get required outputs exp_f = results.t(0) **2 assert_array_almost_equal(exp_f, output_F(results, c1)) nipy-0.6.1/nipy/modalities/fmri/glm.py000066400000000000000000000553731470056100100177030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module presents an interface to use the glm implemented in nipy.algorithms.statistics.models.regression. It contains the GLM and contrast classes that are meant to be the main objects of fMRI data analyses. It is important to note that the GLM is meant as a one-session General Linear Model. But inference can be performed on multiple sessions by computing fixed effects on contrasts Examples -------- >>> import numpy as np >>> from nipy.modalities.fmri.glm import GeneralLinearModel >>> n, p, q = 100, 80, 10 >>> X, Y = np.random.randn(p, q), np.random.randn(p, n) >>> cval = np.hstack((1, np.zeros(9))) >>> model = GeneralLinearModel(X) >>> model.fit(Y) >>> z_vals = model.contrast(cval).z_score() # z-transformed statistics Example of fixed effects statistics across two contrasts >>> cval_ = cval.copy() >>> np.random.shuffle(cval_) >>> z_ffx = (model.contrast(cval) + model.contrast(cval_)).z_score() """ from warnings import warn import numpy as np import scipy.stats as sps from nibabel import Nifti1Image, load from nipy.algorithms.statistics.models.regression import ARModel, OLSModel from nipy.algorithms.statistics.utils import multiple_mahalanobis, z_score from nipy.io.nibcompat import get_affine, get_header from nipy.labs.mask import compute_mask_sessions DEF_TINY = 1e-50 DEF_DOFMAX = 1e10 def data_scaling(Y): """Scaling of the data to have percent of baseline change columnwise Parameters ---------- Y: array of shape(n_time_points, n_voxels) the input data Returns ------- Y: array of shape (n_time_points, n_voxels), the data after mean-scaling, de-meaning and multiplication by 100 mean : array of shape (n_voxels,) the data mean """ mean = Y.mean(0) Y = 100 * (Y / mean - 1) return Y, mean class GeneralLinearModel: """ This class handles the so-called on General Linear Model Most of what it does in the fit() and contrast() methods fit() performs the standard two-step ('ols' then 'ar1') GLM fitting contrast() returns a contrast instance, yileding statistics and p-values. The link between fit() and contrast is done vis the two class members: glm_results : dictionary of nipy.algorithms.statistics.models. regression.RegressionResults instances, describing results of a GLM fit labels : array of shape(n_voxels), labels that associate each voxel with a results key """ def __init__(self, X): """ Parameters ---------- X : array of shape (n_time_points, n_regressors) the design matrix """ self.X = X self.labels_ = None self.results_ = None def fit(self, Y, model='ols', steps=100): """GLM fitting of a dataset using 'ols' regression or the two-pass Parameters ---------- Y : array of shape(n_time_points, n_samples) the fMRI data model : {'ar1', 'ols'}, optional the temporal variance model. Defaults to 'ols' steps : int, optional Maximum number of discrete steps for the AR(1) coef histogram """ if model not in ['ar1', 'ols']: raise ValueError('Unknown model') if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[0] != self.X.shape[0]: raise ValueError('Response and predictors are inconsistent') # fit the OLS model ols_result = OLSModel(self.X).fit(Y) # compute and discretize the AR1 coefs ar1 = ((ols_result.resid[1:] * ols_result.resid[:-1]).sum(0) / (ols_result.resid ** 2).sum(0)) ar1 = (ar1 * steps).astype(np.int_) * 1. / steps # Fit the AR model according to current AR(1) estimates if model == 'ar1': self.results_ = {} self.labels_ = ar1 # fit the model for val in np.unique(self.labels_): m = ARModel(self.X, val) self.results_[val] = m.fit(Y[:, self.labels_ == val]) else: self.labels_ = np.zeros(Y.shape[1]) self.results_ = {0.0: ols_result} def get_beta(self, column_index=None): """Accessor for the best linear unbiased estimated of model parameters Parameters ---------- column_index: int or array-like of int or None, optional The indexed of the columns to be returned. if None (default behaviour), the whole vector is returned Returns ------- beta: array of shape (n_voxels, n_columns) the beta """ # make colum_index a list if it an int if column_index is None: column_index = np.arange(self.X.shape[1]) if not hasattr(column_index, '__iter__'): column_index = [int(column_index)] n_beta = len(column_index) # build the beta array beta = np.zeros((n_beta, self.labels_.size), dtype=np.float64) for l in self.results_: beta[:, self.labels_ == l] = self.results_[l].theta[column_index] return beta def get_mse(self): """Accessor for the mean squared error of the model Returns ------- mse: array of shape (n_voxels) the sum of square error per voxel """ # build the beta array mse = np.zeros(self.labels_.size, dtype=np.float64) for l in self.results_: mse[self.labels_ == l] = self.results_[l].MSE return mse def get_logL(self): """Accessor for the log-likelihood of the model Returns ------- logL: array of shape (n_voxels,) the sum of square error per voxel """ # build the beta array logL = np.zeros(self.labels_.size, dtype=np.float64) for l in self.results_: logL[self.labels_ == l] = self.results_[l].logL return logL def contrast(self, con_val, contrast_type=None): """ Specify and estimate a linear contrast Parameters ---------- con_val : numpy.ndarray of shape (p) or (q, p) where q = number of contrast vectors and p = number of regressors contrast_type : {None, 't', 'F' or 'tmin-conjunction'}, optional type of the contrast. If None, then defaults to 't' for 1D `con_val` and 'F' for 2D `con_val` Returns ------- con: Contrast instance """ if self.labels_ is None or self.results_ is None: raise ValueError('The model has not been estimated yet') con_val = np.asarray(con_val) if con_val.ndim == 1: dim = 1 else: dim = con_val.shape[0] if contrast_type is None: if dim == 1: contrast_type = 't' else: contrast_type = 'F' if contrast_type not in ['t', 'F', 'tmin-conjunction']: raise ValueError(f'Unknown contrast type: {contrast_type}') effect_ = np.zeros((dim, self.labels_.size), dtype=np.float64) var_ = np.zeros((dim, dim, self.labels_.size), dtype=np.float64) if contrast_type == 't': for l in self.results_: resl = self.results_[l].Tcontrast(con_val) effect_[:, self.labels_ == l] = resl.effect.T var_[:, :, self.labels_ == l] = (resl.sd ** 2).T else: for l in self.results_: resl = self.results_[l].Fcontrast(con_val) effect_[:, self.labels_ == l] = resl.effect var_[:, :, self.labels_ == l] = resl.covariance dof_ = self.results_[l].df_resid return Contrast(effect=effect_, variance=var_, dof=dof_, contrast_type=contrast_type) class Contrast: """ The contrast class handles the estimation of statistical contrasts on a given model: student (t), Fisher (F), conjunction (tmin-conjunction). The important feature is that it supports addition, thus opening the possibility of fixed-effects models. The current implementation is meant to be simple, and could be enhanced in the future on the computational side (high-dimensional F contrasts may lead to memory breakage). Notes ----- The 'tmin-conjunction' test is the valid conjunction test discussed in: Nichols T, Brett M, Andersson J, Wager T, Poline JB. Valid conjunction inference with the minimum statistic. Neuroimage. 2005 Apr 15;25(3):653-60. This test gives the p-value of the z-values under the conjunction null, i.e. the union of the null hypotheses for all terms. """ def __init__(self, effect, variance, dof=DEF_DOFMAX, contrast_type='t', tiny=DEF_TINY, dofmax=DEF_DOFMAX): """ Parameters ========== effect: array of shape (contrast_dim, n_voxels) the effects related to the contrast variance: array of shape (contrast_dim, contrast_dim, n_voxels) the associated variance estimate dof: scalar, the degrees of freedom contrast_type: string to be chosen among 't' and 'F' """ if variance.ndim != 3: raise ValueError('Variance array should have 3 dimensions') if effect.ndim != 2: raise ValueError('Variance array should have 2 dimensions') if variance.shape[0] != variance.shape[1]: raise ValueError('Inconsistent shape for the variance estimate') if ((variance.shape[1] != effect.shape[0]) or (variance.shape[2] != effect.shape[1])): raise ValueError('Effect and variance have inconsistent shape') self.effect = effect self.variance = variance self.dof = float(dof) self.dim = effect.shape[0] if self.dim > 1 and contrast_type == 't': print('Automatically converted multi-dimensional t to F contrast') contrast_type = 'F' self.contrast_type = contrast_type self.stat_ = None self.p_value_ = None self.baseline = 0 self.tiny = tiny self.dofmax = dofmax def stat(self, baseline=0.0): """ Return the decision statistic associated with the test of the null hypothesis: (H0) 'contrast equals baseline' Parameters ========== baseline: float, optional, Baseline value for the test statistic """ self.baseline = baseline # Case: one-dimensional contrast ==> t or t**2 if self.dim == 1: # avoids division by zero stat = (self.effect - baseline) / np.sqrt( np.maximum(self.variance, self.tiny)) if self.contrast_type == 'F': stat = stat ** 2 # Case: F contrast elif self.contrast_type == 'F': # F = |t|^2/q , |t|^2 = e^t inv(v) e if self.effect.ndim == 1: self.effect = self.effect[np.newaxis] if self.variance.ndim == 1: self.variance = self.variance[np.newaxis, np.newaxis] stat = (multiple_mahalanobis(self.effect - baseline, self.variance) / self.dim) # Case: tmin (conjunctions) elif self.contrast_type == 'tmin-conjunction': vdiag = self.variance.reshape([self.dim ** 2] + list( self.variance.shape[2:]))[:: self.dim + 1] stat = (self.effect - baseline) / np.sqrt( np.maximum(vdiag, self.tiny)) stat = stat.min(0) # Unknown stat else: raise ValueError('Unknown statistic type') self.stat_ = stat return stat.ravel() def p_value(self, baseline=0.0): """Return a parametric estimate of the p-value associated with the null hypothesis: (H0) 'contrast equals baseline' Parameters ---------- baseline: float, optional Baseline value for the test statistic Notes ----- The value of 0.5 is used where the stat is not defined """ if self.stat_ is None or self.baseline != baseline: self.stat_ = self.stat(baseline) # Valid conjunction as in Nichols et al, Neuroimage 25, 2005. if self.contrast_type in ['t', 'tmin-conjunction']: p = sps.t.sf(self.stat_, np.minimum(self.dof, self.dofmax)) elif self.contrast_type == 'F': p = sps.f.sf(self.stat_, self.dim, np.minimum( self.dof, self.dofmax)) else: raise ValueError('Unknown statistic type') p[np.isnan(self.stat_)] = .5 self.p_value_ = p return p def z_score(self, baseline=0.0): """Return a parametric estimation of the z-score associated with the null hypothesis: (H0) 'contrast equals baseline' Parameters ---------- baseline: float, optional Baseline value for the test statistic Notes ----- The value of 0 is used where the stat is not defined """ if self.p_value_ is None or self.baseline != baseline: self.p_value_ = self.p_value(baseline) # Avoid inf values kindly supplied by scipy. self.z_score_ = z_score(self.p_value_) self.z_score_[np.isnan(self.stat_)] = 0 return self.z_score_ def __add__(self, other): """Addition of selfwith others, Yields an new Contrast instance This should be used only on indepndent contrasts""" if self.contrast_type != other.contrast_type: raise ValueError( 'The two contrasts do not have consistent type dimensions') if self.dim != other.dim: raise ValueError( 'The two contrasts do not have compatible dimensions') effect_ = self.effect + other.effect variance_ = self.variance + other.variance dof_ = self.dof + other.dof return Contrast(effect=effect_, variance=variance_, dof=dof_, contrast_type=self.contrast_type) def __rmul__(self, scalar): """Multiplication of the contrast by a scalar""" scalar = float(scalar) effect_ = self.effect * scalar variance_ = self.variance * scalar ** 2 dof_ = self.dof return Contrast(effect=effect_, variance=variance_, dof=dof_, contrast_type=self.contrast_type) __mul__ = __rmul__ def __div__(self, scalar): return self.__rmul__(1 / float(scalar)) class FMRILinearModel: """ This class is meant to handle GLMs from a higher-level perspective i.e. by taking images as input and output """ def __init__(self, fmri_data, design_matrices, mask='compute', m=0.2, M=0.9, threshold=.5): """Load the data Parameters ---------- fmri_data : Image or str or sequence of Images / str fmri images / paths of the (4D) fmri images design_matrices : arrays or str or sequence of arrays / str design matrix arrays / paths of .npz files mask : str or Image or None, optional string can be 'compute' or a path to an image image is an input (assumed binary) mask image(s), if 'compute', the mask is computed if None, no masking will be applied m, M, threshold: float, optional parameters of the masking procedure. Should be within [0, 1] Notes ----- The only computation done here is mask computation (if required) Examples -------- We need the example data package for this example:: from nipy.utils import example_data from nipy.modalities.fmri.glm import FMRILinearModel fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') multi_session_model = FMRILinearModel(fmri_files, design_files, mask) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(13)[1]] * 2) # The number of voxels with p < 0.001 given by ... print(np.sum(z_image.get_fdata() > 3.09)) """ # manipulate the arguments if isinstance(fmri_data, str) or hasattr(fmri_data, 'get_fdata'): fmri_data = [fmri_data] if isinstance(design_matrices, (str, np.ndarray)): design_matrices = [design_matrices] if len(fmri_data) != len(design_matrices): raise ValueError('Incompatible number of fmri runs and ' 'design matrices were provided') self.fmri_data, self.design_matrices = [], [] self.glms, self.means = [], [] # load the fmri data for fmri_run in fmri_data: if isinstance(fmri_run, str): self.fmri_data.append(load(fmri_run)) else: self.fmri_data.append(fmri_run) # set self.affine as the affine of the first image self.affine = get_affine(self.fmri_data[0]) # load the designs for design_matrix in design_matrices: if isinstance(design_matrix, str): loaded = np.load(design_matrix) self.design_matrices.append(loaded[loaded.files[0]]) else: self.design_matrices.append(design_matrix) # load the mask if mask == 'compute': mask = compute_mask_sessions( fmri_data, m=m, M=M, cc=1, threshold=threshold, opening=0) self.mask = Nifti1Image(mask.astype(np.int8), self.affine) elif mask is None: mask = np.ones(self.fmri_data[0].shape[:3]).astype(np.int8) self.mask = Nifti1Image(mask, self.affine) else: if isinstance(mask, str): self.mask = load(mask) else: self.mask = mask def fit(self, do_scaling=True, model='ar1', steps=100): """ Load the data, mask the data, scale the data, fit the GLM Parameters ---------- do_scaling : bool, optional if True, the data should be scaled as percent of voxel mean model : string, optional, the kind of glm ('ols' or 'ar1') you want to fit to the data steps : int, optional in case of an ar1, discretization of the ar1 parameter """ from nibabel import Nifti1Image # get the mask as an array mask = self.mask.get_fdata().astype(np.bool_) self.glms, self.means = [], [] for fmri, design_matrix in zip(self.fmri_data, self.design_matrices): if do_scaling: # scale the data data, mean = data_scaling(fmri.get_fdata()[mask].T) else: data, mean = (fmri.get_fdata()[mask].T, fmri.get_fdata()[mask].T.mean(0)) mean_data = mask.astype(np.int16) mean_data[mask] = mean self.means.append(Nifti1Image(mean_data, self.affine)) # fit the GLM glm = GeneralLinearModel(design_matrix) glm.fit(data, model, steps) self.glms.append(glm) def contrast(self, contrasts, con_id='', contrast_type=None, output_z=True, output_stat=False, output_effects=False, output_variance=False): """ Estimation of a contrast as fixed effects on all sessions Parameters ---------- contrasts : array or list of arrays of shape (n_col) or (n_dim, n_col) where ``n_col`` is the number of columns of the design matrix, numerical definition of the contrast (one array per run) con_id : str, optional name of the contrast contrast_type : {'t', 'F', 'tmin-conjunction'}, optional type of the contrast output_z : bool, optional Return or not the corresponding z-stat image output_stat : bool, optional Return or not the base (t/F) stat image output_effects : bool, optional Return or not the corresponding effect image output_variance : bool, optional Return or not the corresponding variance image Returns ------- output_images : list of nibabel images The required output images, in the following order: z image, stat(t/F) image, effects image, variance image """ if self.glms == []: raise ValueError('first run fit() to estimate the model') if isinstance(contrasts, np.ndarray): contrasts = [contrasts] if len(contrasts) != len(self.glms): raise ValueError( f'contrasts must be a sequence of {len(self.glms)} session contrasts') contrast_ = None for i, (glm, con) in enumerate(zip(self.glms, contrasts)): if np.all(con == 0): warn('Contrast for session %d is null' % i) elif contrast_ is None: contrast_ = glm.contrast(con, contrast_type) else: contrast_ = contrast_ + glm.contrast(con, contrast_type) if output_z or output_stat: # compute the contrast and stat contrast_.z_score() # Prepare the returned images mask = self.mask.get_fdata().astype(np.bool_) do_outputs = [output_z, output_stat, output_effects, output_variance] estimates = ['z_score_', 'stat_', 'effect', 'variance'] descrips = ['z statistic', 'Statistical value', 'Estimated effect', 'Estimated variance'] dims = [1, 1, contrast_.dim, contrast_.dim ** 2] n_vox = mask.sum() output_images = [] for (do_output, estimate, descrip, dim) in zip( do_outputs, estimates, descrips, dims): if do_output: if dim > 1: result_map = np.tile( mask.astype(np.float64)[:, :, :, np.newaxis], dim) result_map[mask] = np.reshape( getattr(contrast_, estimate).T, (n_vox, dim)) else: result_map = mask.astype(np.float64) result_map[mask] = np.squeeze( getattr(contrast_, estimate)) output = Nifti1Image(result_map, self.affine) get_header(output)['descrip'] = ( f'{descrip} associated with contrast {con_id}') output_images.append(output) return output_images nipy-0.6.1/nipy/modalities/fmri/hemodynamic_models.py000066400000000000000000000320441470056100100227520ustar00rootroot00000000000000""" This module is for canonical hrf specification. Here we provide for SPM, Glover hrfs and finite timpulse response (FIR) models. This module closely follows SPM implementation Author: Bertrand Thirion, 2011--2013 """ import warnings import numpy as np from scipy.stats import gamma def _gamma_difference_hrf(tr, oversampling=16, time_length=32., onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167): """ Compute an hrf as the difference of two gamma functions Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the hrf Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ dt = tr / oversampling time_stamps = np.linspace(0, time_length, int(float(time_length) / dt)) time_stamps -= onset / dt hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) - \ ratio * gamma.pdf( time_stamps, undershoot / u_dispersion, dt / u_dispersion) hrf /= hrf.sum() return hrf def spm_hrf(tr, oversampling=16, time_length=32., onset=0.): """ Implementation of the SPM hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset) def glover_hrf(tr, oversampling=16, time_length=32., onset=0.): """ Implementation of the Glover hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ return _gamma_difference_hrf(tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, u_dispersion=.9, ratio=.35) def spm_time_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the SPM time derivative hrf (dhrf) model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr, float), dhrf sampling on the provided grid """ do = .1 dhrf = 1. / do * (spm_hrf(tr, oversampling, time_length, onset + do) - spm_hrf(tr, oversampling, time_length, onset)) return dhrf def glover_time_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the flover time derivative hrf (dhrf) model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr, float), dhrf sampling on the provided grid """ do = .1 dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset + do) - glover_hrf(tr, oversampling, time_length, onset)) return dhrf def spm_dispersion_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the SPM dispersion derivative hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr * oversampling, float), dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * (_gamma_difference_hrf(tr, oversampling, time_length, onset, dispersion=1. + dd) - spm_hrf(tr, oversampling, time_length, onset)) return dhrf def _sample_condition(exp_condition, frametimes, oversampling=16, min_onset=-24): """Make a possibly oversampled event regressor from condition information. Parameters ---------- exp_condition: 3 x n_events arraylike (onsets, durations, amplitudes) of events for this condition frametimes: array of shape(n) timepoints corresponding to sampled data over_sampling: int, optional factor for oversampling event regressor min_onset: float, optional minimal onset relative to frametimes[0] (in seconds) events that start before frametimes[0] + min_onset are not considered Returns ------- regressor: array of shape(n) possibly oversampled event regressor hr_frametimes : array of shape(n) frametimes corresponding to regressor """ # Find the high-resolution frametimes n = frametimes.size min_onset = float(min_onset) n_hr = ((n - 1) * 1. / (frametimes.max() - frametimes.min()) * (frametimes.max() * (1 + 1. / (n - 1)) - frametimes.min() - min_onset) * oversampling) + 1 hr_frametimes = np.linspace(frametimes.min() + min_onset, frametimes.max() * (1 + 1. / (n - 1)), int(n_hr)) # Get the condition information onsets, durations, values = tuple(map(np.asanyarray, exp_condition)) if (onsets < frametimes[0] + min_onset).any(): warnings.warn(('Some stimulus onsets are earlier than %d in the' + ' experiment and are thus not considered in the model' % (frametimes[0] + min_onset)), UserWarning) # Set up the regressor timecourse tmax = len(hr_frametimes) regressor = np.zeros_like(hr_frametimes).astype(np.float64) t_onset = np.minimum(np.searchsorted(hr_frametimes, onsets), tmax - 1) regressor[t_onset] += values t_offset = np.minimum(np.searchsorted(hr_frametimes, onsets + durations), tmax - 1) # Handle the case where duration is 0 by offsetting at t + 1 for i, to in enumerate(t_offset): if to < (tmax - 1) and to == t_onset[i]: t_offset[i] += 1 regressor[t_offset] -= values regressor = np.cumsum(regressor) return regressor, hr_frametimes def _resample_regressor(hr_regressor, hr_frametimes, frametimes, kind='linear'): """ this function samples the regressors at frametimes Parameters ---------- hr_regressor: array of shape(n), the regressor time course sampled at high temporal resolution hr_frametimes: array of shape(n), the corresponding time stamps frametimes: array of shape(p), the desired time stamps kind: string, optional, the kind of desired interpolation Returns ------- regressor: array of shape(p), the resampled regressor """ from scipy.interpolate import interp1d f = interp1d(hr_frametimes, hr_regressor) return f(frametimes).T def _orthogonalize(X): """ Orthogonalize every column of design `X` w.r.t preceding columns Parameters ---------- X: array of shape(n, p), the data to be orthogonalized Returns ------- X: after orthogonalization Notes ----- X is changed in place. the columns are not normalized """ if X.size == X.shape[0]: return X from numpy.linalg import pinv for i in range(1, X.shape[1]): X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], pinv(X[:, :i]))) return X def _regressor_names(con_name, hrf_model, fir_delays=None): """ returns a list of regressor names, computed from con-name and hrf type Parameters ---------- con_name: string, identifier of the condition hrf_model: string, identifier of the hrf model Returns ------- names: a list of strings yielding the regressor names """ if hrf_model == 'canonical': return [con_name] elif hrf_model == "canonical with derivative": return [con_name, con_name + "_derivative"] elif hrf_model == 'spm': return [con_name] elif hrf_model == 'spm_time': return [con_name, con_name + "_derivative"] elif hrf_model == 'spm_time_dispersion': return [con_name, con_name + "_derivative", con_name + "_dispersion"] elif hrf_model == 'fir': return [con_name + "_delay_%d" % i for i in fir_delays] def _hrf_kernel(hrf_model, tr, oversampling=16, fir_delays=None): """ Given the specification of the hemodynamic model and time parameters, return the list of matching kernels Parameters ---------- hrf_model: string, identifier of the hrf model tr: the repetition time in seconds oversampling: int, temporal oversampling factor to have a smooth hrf fir_delays: list of for delays Returns ------- hkernel: a list of hrf kernels, depending on the specified hrf model """ if hrf_model == 'spm': hkernel = [spm_hrf(tr, oversampling)] elif hrf_model == 'spm_time': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling)] elif hrf_model == 'spm_time_dispersion': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling), spm_dispersion_derivative(tr, oversampling)] elif hrf_model == 'canonical': hkernel = [glover_hrf(tr, oversampling)] elif hrf_model == 'canonical with derivative': hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling)] elif hrf_model == 'fir': hkernel = [np.hstack((np.zeros(f * oversampling), np.ones(oversampling))) for f in fir_delays] else: raise ValueError('Unknown hrf model') return hkernel def compute_regressor(exp_condition, hrf_model, frametimes, con_id='cond', oversampling=16, fir_delays=None, min_onset=-24): """ This is the main function to convolve regressors with hrf model Parameters ---------- exp_condition: descriptor of an experimental condition hrf_model: string, the hrf model to be used. Can be chosen among: 'spm', 'spm_time', 'spm_time_dispersion', 'canonical', 'canonical_derivative', 'fir' frametimes: array of shape (n):the sought con_id: string, optional identifier of the condition oversampling: int, optional, oversampling factor to perform the convolution fir_delays: array-like of int, onsets corresponding to the fir basis min_onset: float, optional minimal onset relative to frametimes[0] (in seconds) events that start before frametimes[0] + min_onset are not considered Returns ------- creg: array of shape(n_scans, n_reg): computed regressors sampled at frametimes reg_names: list of strings, corresponding regressor names Notes ----- The different hemodynamic models can be understood as follows: 'spm': this is the hrf model used in spm 'spm_time': this is the spm model plus its time derivative (2 regressors) 'spm_time_dispersion': idem, plus dispersion derivative (3 regressors) 'canonical': this one corresponds to the Glover hrf 'canonical_derivative': the Glover hrf + time derivative (2 regressors) 'fir': finite impulse response basis, a set of delayed dirac models with arbitrary length. This one currently assumes regularly spaced frametimes (i.e. fixed time of repetition). It is expected that spm standard and Glover model would not yield large differences in most cases. """ # this is the average tr in this session, not necessarily the true tr tr = float(frametimes.max()) / (np.size(frametimes) - 1) # 1. create the high temporal resolution regressor hr_regressor, hr_frametimes = _sample_condition( exp_condition, frametimes, oversampling, min_onset) # 2. create the hrf model(s) hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays) # 3. convolve the regressor and hrf, and downsample the regressor conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size] for h in hkernel]) # 4. temporally resample the regressors creg = _resample_regressor(conv_reg, hr_frametimes, frametimes) # 5. ortogonalize the regressors if hrf_model != 'fir': creg = _orthogonalize(creg) # 6 generate regressor names reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays) return creg, reg_names nipy-0.6.1/nipy/modalities/fmri/hrf.py000066400000000000000000000174661470056100100177040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides definitions of various hemodynamic response functions (hrf). In particular, it provides Gary Glover's canonical HRF, AFNI's default HRF, and a spectral HRF. The Glover HRF is based on: @article{glover1999deconvolution, title={{Deconvolution of impulse response in event-related BOLD fMRI}}, author={Glover, G.H.}, journal={NeuroImage}, volume={9}, number={4}, pages={416--429}, year={1999}, publisher={Orlando, FL: Academic Press, c1992-} } This parametrization is from fmristat: http://www.math.mcgill.ca/keith/fmristat/ fmristat models the HRF as the difference of two gamma functions, ``g1`` and ``g2``, each defined by the timing of the gamma function peaks (``pk1, pk2``) and the FWHMs (``width1, width2``): raw_hrf = g1(pk1, width1) - a2 * g2(pk2, width2) where ``a2`` is the scale factor for the ``g2`` gamma function. The actual hrf is the raw hrf set to have an integral of 1. fmristat used ``pk1, width1, pk2, width2, a2 = (5.4 5.2 10.8 7.35 0.35)``. These are parameters to match Glover's 1 second duration auditory stimulus curves. Glover wrote these as: y(t) = c1 * t**n1 * exp(t/t1) - a2 * c2 * t**n2 * exp(t/t2) with ``n1, t1, n2, t2, a2 = (6.0, 0.9, 12, 0.9, 0.35)``, and ``c1, c2`` being ``1/max(t**n1 * exp(t/t1)), 1/max(t**n2 * exp(t/t2)``. The difference between Glover's expression and ours is because we (and fmristat) use the peak location and width to characterize the function rather than ``n1, t1``. The values we use are equivalent. Specifically, in our formulation: >>> n1, t1, c1 = gamma_params(5.4, 5.2) >>> np.allclose((n1-1, t1), (6.0, 0.9), rtol=0.02) True >>> n2, t2, c2 = gamma_params(10.8, 7.35) >>> np.allclose((n2-1, t2), (12.0, 0.9), rtol=0.02) True """ from functools import partial import numpy as np import scipy.stats as sps import sympy from sympy.utilities.lambdify import implemented_function from .utils import T, lambdify_t def gamma_params(peak_location, peak_fwhm): """ Parameters for gamma density given peak and width TODO: where does the coef come from again.... check fmristat code From a peak location and peak FWHM, determine the parameters (shape, scale) of a Gamma density: f(x) = coef * x**(shape-1) * exp(-x/scale) The coefficient returned ensures that the f has integral 1 over [0,np.inf] Parameters ---------- peak_location : float Location of the peak of the Gamma density peak_fwhm : float FWHM at the peak Returns ------- shape : float Shape parameter in the Gamma density scale : float Scale parameter in the Gamma density coef : float Coefficient needed to ensure the density has integral 1. """ shape_m1 = np.power(peak_location / peak_fwhm, 2) * 8 * np.log(2.0) scale = np.power(peak_fwhm, 2) / peak_location / 8 / np.log(2.0) coef = peak_location**(-shape_m1) * np.exp(peak_location / scale) return shape_m1 + 1, scale, coef def gamma_expr(peak_location, peak_fwhm): shape, scale, coef = gamma_params(peak_location, peak_fwhm) return ( coef * sympy.Piecewise((T + 1e-14, T >= 0), (0, True))**(shape-1) * sympy.exp(-(T+1.0e-14)/scale) ) def _get_sym_int(f, dt=0.02, t=50): # numerical integral of symbolic function return _get_num_int(lambdify_t(f), dt, t) def _get_num_int(lf, dt=0.02, t=50): # numerical integral of numerical function tt = np.arange(dt,t+dt,dt) return lf(tt).sum() * dt # Glover HRF _gexpr = gamma_expr(5.4, 5.2) - 0.35 * gamma_expr(10.8, 7.35) _gexpr = _gexpr / _get_sym_int(_gexpr) # The numerical function (pass times to get values) glovert = lambdify_t(_gexpr) # The symbolic function glover = implemented_function('glover', glovert) # Derivative of Glover HRF _dgexpr = _gexpr.diff(T) _dpos = sympy.Derivative((T >= 0), T) _dgexpr = _dgexpr.subs(_dpos, 0) _dgexpr = _dgexpr / _get_sym_int(sympy.Abs(_dgexpr)) # Numerical function dglovert = lambdify_t(_dgexpr) # Symbolic function dglover = implemented_function('dglover', dglovert) del(_gexpr); del(_dpos); del(_dgexpr) # AFNI's HRF _aexpr = sympy.Piecewise((T, T >= 0), (0, True))**8.6 * sympy.exp(-T/0.547) _aexpr = _aexpr / _get_sym_int(_aexpr) # Numerical function afnit = lambdify_t(_aexpr) # Symbolic function afni = implemented_function('afni', afnit) del(_aexpr) # SPMs HRF def spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio = 6, normalize=True, ): """ SPM HRF function from sum of two gamma PDFs This function is designed to be partially compatible with SPMs `spm_hrf.m` function. The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`). Parameters ---------- t : array-like vector of times at which to sample HRF. peak_delay : float, optional delay of peak. under_delay : float, optional delay of undershoot. peak_disp : float, optional width (dispersion) of peak. under_disp : float, optional width (dispersion) of undershoot. p_u_ratio : float, optional peak to undershoot ratio. Undershoot divided by this value before subtracting from peak. normalize : {True, False}, optional If True, divide HRF values by their sum before returning. SPM does this by default. Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t`. Notes ----- See ``spm_hrf.m`` in the SPM distribution. """ if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if v <= 0]): raise ValueError("delays and dispersions must be > 0") # gamma.pdf only defined for t > 0 hrf = np.zeros(t.shape, dtype=np.float64) pos_t = t[t > 0] peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale = peak_disp) undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0, scale = under_disp) hrf[t > 0] = peak - undershoot / p_u_ratio if not normalize: return hrf return hrf / np.sum(hrf) _spm_can_int = _get_num_int(partial(spm_hrf_compat, normalize=False)) def spmt(t): """ SPM canonical HRF, HRF values for time values `t` This is the canonical HRF function as used in SPM """ return spm_hrf_compat(t, normalize=False) / _spm_can_int def dspmt(t): """ SPM canonical HRF derivative, HRF derivative values for time values `t` This is the canonical HRF derivative function as used in SPM. It is the numerical difference of the HRF sampled at time `t` minus the values sampled at time `t` -1 """ t = np.asarray(t) return spmt(t) - spmt(t - 1) _spm_dd_func = partial(spm_hrf_compat, normalize=False, peak_disp=1.01) _spm_dd_func_int = _get_num_int(_spm_dd_func) def ddspmt(t): """ SPM canonical HRF dispersion derivative, values for time values `t` This is the canonical HRF dispersion derivative function as used in SPM. It is the numerical difference between the HRF sampled at time `t`, and values at `t` for another HRF shape with a small change in the peak dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`). """ return (spmt(t) - _spm_dd_func(t) / _spm_dd_func_int) / 0.01 spm = implemented_function('spm', spmt) dspm = implemented_function('dspm', dspmt) ddspm = implemented_function('ddspm', ddspmt) nipy-0.6.1/nipy/modalities/fmri/realfuncs.py000066400000000000000000000052241470056100100210740ustar00rootroot00000000000000""" Helper functions for constructing design regressors """ import numpy as np def dct_ii_basis(volume_times, order=None, normcols=False): """ DCT II basis up to order `order` See: https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II By default, basis not normalized to length 1, and therefore, basis is not orthogonal. Normalize basis with `normcols` keyword argument. Parameters ---------- volume_times : array-like Times of acquisition of each volume. Must be regular and continuous otherwise we raise an error. order : None or int, optional Order of DCT-II basis. If None, return full basis set. normcols : bool, optional If True, normalize columns to length 1, so return orthogonal `dct_basis`. Returns ------- dct_basis : array Shape ``(len(volume_times), order)`` array with DCT-II basis up to order `order`. Raises ------ ValueError If difference between successive `volume_times` values is not constant over the 1D array. """ N = len(volume_times) if order is None: order = N if not np.allclose(np.diff(np.diff(volume_times)), 0): raise ValueError("DCT basis assumes continuous regular sampling") n = np.arange(N) cycle = np.pi * (n + 0.5) / N dct_basis = np.zeros((N, order)) for k in range(order): dct_basis[:, k] = np.cos(cycle * k) if normcols: # Set column lengths to 1 lengths = np.ones(order) * np.sqrt(N / 2.) lengths[0:1] = np.sqrt(N) # Allow order=0 dct_basis /= lengths return dct_basis def dct_ii_cut_basis(volume_times, cut_period): """DCT-II regressors with periods >= `cut_period` See: http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Parameters ---------- volume_times : array-like Times of acquisition of each volume. Must be regular and continuous otherwise we raise an error. cut_period: float Cut period (wavelength) of the low-pass filter (in time units). Returns ------- cdrift: array shape (n_scans, n_drifts) DCT-II drifts plus a constant regressor in the final column. Constant regressor always present, regardless of `cut_period`. """ N = len(volume_times) hfcut = 1./ cut_period dt = volume_times[1] - volume_times[0] # Such that hfcut = 1/(2*dt) yields N order = int(np.floor(2 * N * hfcut * dt)) # Always return constant column if order == 0: return np.ones((N, 1)) basis = np.ones((N, order)) basis[:, :-1] = dct_ii_basis(volume_times, order, normcols=True)[:, 1:] return basis nipy-0.6.1/nipy/modalities/fmri/spm/000077500000000000000000000000001470056100100173345ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/spm/__init__.py000066400000000000000000000005761470056100100214550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ An (approximate) version of SPM's run-level model for fMRI data Consists of an OLS pass through the data, followed by a pooled estimate of a covariance matrix constructed from a series expansion of an AR1 model, expanded in terms of rho. """ from . import model nipy-0.6.1/nipy/modalities/fmri/spm/correlation.py000066400000000000000000000030511470056100100222260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.linalg import inv def ARcovariance(rho, n, cor=False, sigma=1.): """ Return covariance matrix of a sample of length n from an AR(p) process with parameters rho. INPUTS: rho -- an array of length p sigma -- standard deviation of the white noise """ rho = np.asarray(rho) p = rho.shape[0] invK = np.identity(n) for i in range(p): invK -= np.diag((rho[i] / sigma) * np.ones(n-i-1), k=-i-1) K = inv(invK) Q = np.dot(K, K.T) if cor: sd = np.sqrt(np.diag(Q)) sdout = np.multiply.outer(sd, sd) Q /= sd return Q def ARcomponents(rho, n, drho=0.05, cor=False, sigma=1): """ Numerically differentiate covariance matrices of AR(p) of length n with respect to AR parameters around the value rho. If drho is a vector, they are treated as steps in the numerical differentiation. """ rho = np.asarray(rho) drho = np.asarray(drho) p = rho.shape[0] value = [] if drho.shape == (): drho = np.ones(p, np.float64) * drho drho = np.diag(drho) Q = ARcovariance(rho, n, cor=cor, sigma=sigma) value = [Q] for i in range(p): value.append((ARcovariance(rho + drho[i], n, cor=cor) - Q) / drho[i,i]) return np.asarray(value) if __name__ == "__main__": #print np.diag(ARcovariance([0.3], 100, cor=True), k=0) print(len(ARcomponents([0.321],8, drho=0.02))) nipy-0.6.1/nipy/modalities/fmri/spm/model.py000066400000000000000000000074351470056100100210170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import numpy.linalg as L from scipy.stats import f as FDbn from nipy.algorithms.statistics.models.regression import GLSModel, OLSModel from nipy.core.api import Image from nipy.modalities.fmri.fmristat import model as fmristat from nipy.modalities.fmri.fmristat.model import OLS from . import correlation, reml def Fmask(Fimg, dfnum, dfdenom, pvalue=1.0e-04): """ Create mask for use in estimating pooled covariance based on an F contrast. """ ## TODO check nipy.algorithms.statistics.models.contrast to see if rank is ## correctly set -- I don't think it is right now. print(dfnum, dfdenom) thresh = FDbn.ppf(pvalue, dfnum, dfdenom) return Image(np.greater(np.asarray(Fimg), thresh), Fimg.grid.copy()) def estimate_pooled_covariance(resid, ARtarget=[0.3], mask=None): """ Use SPM's REML implementation to estimate a pooled covariance matrix. Thresholds an F statistic at a marginal pvalue to estimate covariance matrix. """ resid n = resid[:].shape[0] components = correlation.ARcomponents(ARtarget, n) raw_sigma = 0 nvox = 0 for i in range(resid.shape[1]): d = np.asarray(resid[:,i]) d.shape = (d.shape[0], np.prod(d.shape[1:])) keep = np.asarray(mask[i]) keep.shape = np.prod(keep.shape) d = d.compress(keep, axis=1) raw_sigma += np.dot(d, d.T) nvox += d.shape[1] raw_sigma /= nvox C, h, _ = reml.reml(raw_sigma, components, n=nvox) return C class SecondStage: """ Parameters ---------- fmri_image : `FmriImageList` object returning 4D array from ``np.asarray``, having attribute ``volume_start_times`` (if `volume_start_times` is None), and such that ``object[0]`` returns something with attributes ``shape`` formula : :class:`nipy.algorithms.statistics.formula.Formula` sigma : outputs : volume_start_times : """ def __init__(self, fmri_image, formula, sigma, outputs=[], volume_start_times=None): self.fmri_image = fmri_image self.data = np.asarray(fmri_image) self.formula = formula self.outputs = outputs self.sigma = sigma if volume_start_times is None: self.volume_start_times = self.fmri_image.volume_start_times else: self.volume_start_times = volume_start_times def execute(self): def model_params(*args): return (self.sigma,) m = fmristat.model_generator(self.formula, self.data, self.volume_start_times, model_type=GLSModel, model_params=model_params) r = fmristat.results_generator(m) def reshape(i, x): """ To write output, arrays have to be reshaped -- this function does the appropriate reshaping for the two passes of fMRIstat. These passes are i) 'slices through the z-axis' ii) 'parcels of approximately constant AR1 coefficient' """ if len(x.shape) == 2: if type(i) is int: x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:] if type(i) not in [type([]), type(())]: i = (i,) else: i = tuple(i) i = (slice(None,None,None),) + tuple(i) else: if type(i) is int: x.shape = self.fmri_image[0].shape[1:] return i, x o = fmristat.generate_output(self.outputs, r, reshape=reshape) nipy-0.6.1/nipy/modalities/fmri/spm/reml.py000066400000000000000000000106001470056100100206420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import numpy.linalg as npl def orth(X, tol=1.0e-07): """ Compute orthonormal basis for the column span of X. Rank is determined by zeroing all singular values, u, less than or equal to tol*u.max(). INPUTS: X -- n-by-p matrix OUTPUTS: B -- n-by-rank(X) matrix with orthonormal columns spanning the column rank of X """ B, u, _ = npl.svd(X, full_matrices=False) nkeep = np.greater(u, tol*u.max()).astype(np.int_).sum() return B[:,:nkeep] def reml(sigma, components, design=None, n=1, niter=128, penalty_cov=np.exp(-32), penalty_mean=0): """ Adapted from spm_reml.m ReML estimation of covariance components from sigma using design matrix. INPUTS: sigma -- m-by-m covariance matrix components -- q-by-m-by-m array of variance components mean of sigma is modeled as a some over components[i] design -- m-by-p design matrix whose effect is to be removed for ReML. If None, no effect removed (???) n -- degrees of freedom of sigma penalty_cov -- quadratic penalty to be applied in Fisher algorithm. If the value is a float, f, the penalty is f * identity(m). If the value is a 1d array, this is the diagonal of the penalty. penalty_mean -- mean of quadratic penalty to be applied in Fisher algorithm. If the value is a float, f, the location is f * np.ones(m). OUTPUTS: C -- estimated mean of sigma h -- array of length q representing coefficients of variance components cov_h -- estimated covariance matrix of h """ # initialise coefficient, gradient, Hessian Q = components PQ = np.zeros(Q.shape) q = Q.shape[0] m = Q.shape[1] # coefficient h = np.array([np.diag(Q[i]).mean() for i in range(q)]) ## SPM initialization ## h = np.array([np.any(np.diag(Q[i])) for i in range(q)]).astype(np.float64) C = np.sum([h[i] * Q[i] for i in range(Q.shape[0])], axis=0) # gradient in Fisher algorithm dFdh = np.zeros(q) # Hessian in Fisher algorithm dFdhh = np.zeros((q,q)) # penalty terms penalty_cov = np.asarray(penalty_cov) if penalty_cov.shape == (): penalty_cov = penalty_cov * np.identity(q) elif penalty_cov.shape == (q,): penalty_cov = np.diag(penalty_cov) penalty_mean = np.asarray(penalty_mean) if penalty_mean.shape == (): penalty_mean = np.ones(q) * penalty_mean # compute orthonormal basis of design space if design is not None: X = orth(design) else: X = None _iter = 0 _F = np.inf while True: # Current estimate of mean parameter iC = npl.inv(C + np.identity(m) / np.exp(32)) # E-step: conditional covariance if X is not None: iCX = np.dot(iC, X) Cq = npl.inv(X.T, iCX) P = iC - np.dot(iCX, np.dot(Cq, iCX)) else: P = iC # M-step: ReML estimate of hyperparameters # Gradient dF/dh (first derivatives) # Expected curvature (second derivatives) U = np.identity(m) - np.dot(P, sigma) / n for i in range(q): PQ[i] = np.dot(P, Q[i]) dFdh[i] = -(PQ[i] * U).sum() * n / 2 for j in range(i+1): dFdhh[i,j] = -(PQ[i]*PQ[j]).sum() * n / 2 dFdhh[j,i] = dFdhh[i,j] # Enforce penalties: dFdh = dFdh - np.dot(penalty_cov, h - penalty_mean) dFdhh = dFdhh - penalty_cov dh = npl.solve(dFdhh, dFdh) h -= dh C = np.sum([h[i] * Q[i] for i in range(Q.shape[0])], axis=0) df = (dFdh * dh).sum() if np.fabs(df) < 1.0e-01: break _iter += 1 if _iter >= niter: break return C, h, -dFdhh if __name__ == "__main__": import numpy.random as R X = R.standard_normal((500,3)) Q = np.array([np.identity(3), np.array([[0,1,0],[1,0,0],[0,0,1]]), np.array([[1,0,0],[0,1,1],[0,1,1]])], np.float64) print(reml(np.dot(X.T,X), Q)) nipy-0.6.1/nipy/modalities/fmri/spm/trace.py000066400000000000000000000040211470056100100210010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.linalg import svd from .reml import orth def _trace(x): """ Trace of a square 2d array. Does not check shape of x to ensure it's square. """ return np.diag(x).sum() def _frobenius(A, B): """ Frobenius inner product of A and B: Trace(A'B) Does not check shape of x to ensure it's square. """ return (A * B).sum() def trRV(X=None, V=None): """ If V is None it defaults to identity. If X is None, it defaults to the 0-dimensional subspace, i.e. R is the identity. >>> import numpy as np >>> from numpy.random import standard_normal >>> >>> X = standard_normal((100, 4)) >>> np.allclose(trRV(X), (96.0, 96.0)) True >>> V = np.identity(100) >>> np.allclose(trRV(X), (96.0, 96.0)) True >>> >>> X[:,3] = X[:,1] + X[:,2] >>> np.allclose(trRV(X), (97.0, 97.0)) True >>> >>> u = orth(X) >>> V = np.dot(u, u.T) >>> print(np.allclose(trRV(X, V), 0)) True """ n, p = X.shape if V is None: V = np.identity(n) if X is None: if V is None: trRV = trRVRV = n else: trRV = _trace(V) trRVRV = _frobenius(V, V) else: u = orth(X) if V is None: trRV = trRVRV = n - u.shape[1] else: Vu = np.dot(V, u) utVu = np.dot(u.T, Vu) trRV = _trace(V) - _frobenius(u, Vu) trRVRV = _frobenius(V, V) - 2 * _frobenius(Vu, Vu) + _frobenius(utVu, utVu) return trRV, trRVRV if __name__ == "__main__": from numpy.random import standard_normal X = standard_normal((100, 4)) print(trRV(X)) # should be (96,96) V = np.identity(100) print(trRV(X, V)) # should be (96,96) X[:,3] = X[:,1] + X[:,2] print(trRV(X, V)) # should be (97,97) u = orth(X) V = np.dot(u, u.T) print(trRV(X, V)) # should be (0,0) nipy-0.6.1/nipy/modalities/fmri/tests/000077500000000000000000000000001470056100100176775ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/tests/__init__.py000066400000000000000000000000001470056100100217760ustar00rootroot00000000000000nipy-0.6.1/nipy/modalities/fmri/tests/cond_test1.txt000066400000000000000000000000571470056100100225050ustar00rootroot0000000000000010 5.0 1 20 4.0 2 24 3.0 0.1 nipy-0.6.1/nipy/modalities/fmri/tests/dct_10.txt000066400000000000000000000031121470056100100215070ustar00rootroot00000000000000 3.1622777e-01 4.4170765e-01 4.2532540e-01 3.9847023e-01 3.6180340e-01 3.1622777e-01 2.6286556e-01 2.0303072e-01 1.3819660e-01 6.9959620e-02 3.1622777e-01 3.9847023e-01 2.6286556e-01 6.9959620e-02 -1.3819660e-01 -3.1622777e-01 -4.2532540e-01 -4.4170765e-01 -3.6180340e-01 -2.0303072e-01 3.1622777e-01 3.1622777e-01 2.7383935e-17 -3.1622777e-01 -4.4721360e-01 -3.1622777e-01 -8.2151805e-17 3.1622777e-01 4.4721360e-01 3.1622777e-01 3.1622777e-01 2.0303072e-01 -2.6286556e-01 -4.4170765e-01 -1.3819660e-01 3.1622777e-01 4.2532540e-01 6.9959620e-02 -3.6180340e-01 -3.9847023e-01 3.1622777e-01 6.9959620e-02 -4.2532540e-01 -2.0303072e-01 3.6180340e-01 3.1622777e-01 -2.6286556e-01 -3.9847023e-01 1.3819660e-01 4.4170765e-01 3.1622777e-01 -6.9959620e-02 -4.2532540e-01 2.0303072e-01 3.6180340e-01 -3.1622777e-01 -2.6286556e-01 3.9847023e-01 1.3819660e-01 -4.4170765e-01 3.1622777e-01 -2.0303072e-01 -2.6286556e-01 4.4170765e-01 -1.3819660e-01 -3.1622777e-01 4.2532540e-01 -6.9959620e-02 -3.6180340e-01 3.9847023e-01 3.1622777e-01 -3.1622777e-01 -8.2151805e-17 3.1622777e-01 -4.4721360e-01 3.1622777e-01 1.0408663e-15 -3.1622777e-01 4.4721360e-01 -3.1622777e-01 3.1622777e-01 -3.9847023e-01 2.6286556e-01 -6.9959620e-02 -1.3819660e-01 3.1622777e-01 -4.2532540e-01 4.4170765e-01 -3.6180340e-01 2.0303072e-01 3.1622777e-01 -4.4170765e-01 4.2532540e-01 -3.9847023e-01 3.6180340e-01 -3.1622777e-01 2.6286556e-01 -2.0303072e-01 1.3819660e-01 -6.9959620e-02 nipy-0.6.1/nipy/modalities/fmri/tests/dct_100.txt000066400000000000000000004705441470056100100216100ustar00rootroot00000000000000 1.0000000e-01 1.4140391e-01 1.4135157e-01 1.4126436e-01 1.4114229e-01 1.4098540e-01 1.4079372e-01 1.4056731e-01 1.4030621e-01 1.4001049e-01 1.3968022e-01 1.3931550e-01 1.3891640e-01 1.3848302e-01 1.3801547e-01 1.3751387e-01 1.3697834e-01 1.3640902e-01 1.3580604e-01 1.3516954e-01 1.3449970e-01 1.3379667e-01 1.3306063e-01 1.3229176e-01 1.3149025e-01 1.3065630e-01 1.2979010e-01 1.2889189e-01 1.2796187e-01 1.2700028e-01 1.2600735e-01 1.2498333e-01 1.2392848e-01 1.2284305e-01 1.2172730e-01 1.2058153e-01 1.1940600e-01 1.1820101e-01 1.1696686e-01 1.1570384e-01 1.1441228e-01 1.1309249e-01 1.1174479e-01 1.1036953e-01 1.0896703e-01 1.0753764e-01 1.0608172e-01 1.0459963e-01 1.0309173e-01 1.0155839e-01 1.0000000e-01 9.8416932e-02 9.6809580e-02 9.5178342e-02 9.3523621e-02 9.1845824e-02 9.0145365e-02 8.8422664e-02 8.6678147e-02 8.4912243e-02 8.3125388e-02 8.1318023e-02 7.9490594e-02 7.7643552e-02 7.5777352e-02 7.3892456e-02 7.1989327e-02 7.0068437e-02 6.8130258e-02 6.6175269e-02 6.4203952e-02 6.2216794e-02 6.0214285e-02 5.8196919e-02 5.6165193e-02 5.4119610e-02 5.2060674e-02 4.9988892e-02 4.7904776e-02 4.5808841e-02 4.3701602e-02 4.1583582e-02 3.9455301e-02 3.7317285e-02 3.5170061e-02 3.3014160e-02 3.0850113e-02 2.8678454e-02 2.6499720e-02 2.4314447e-02 2.2123174e-02 1.9926443e-02 1.7724796e-02 1.5518775e-02 1.3308925e-02 1.1095792e-02 8.8799204e-03 6.6618581e-03 4.4421521e-03 2.2213501e-03 1.0000000e-01 1.4126436e-01 1.4079372e-01 1.4001049e-01 1.3891640e-01 1.3751387e-01 1.3580604e-01 1.3379667e-01 1.3149025e-01 1.2889189e-01 1.2600735e-01 1.2284305e-01 1.1940600e-01 1.1570384e-01 1.1174479e-01 1.0753764e-01 1.0309173e-01 9.8416932e-02 9.3523621e-02 8.8422664e-02 8.3125388e-02 7.7643552e-02 7.1989327e-02 6.6175269e-02 6.0214285e-02 5.4119610e-02 4.7904776e-02 4.1583582e-02 3.5170061e-02 2.8678454e-02 2.2123174e-02 1.5518775e-02 8.8799204e-03 2.2213501e-03 -4.4421521e-03 -1.1095792e-02 -1.7724796e-02 -2.4314447e-02 -3.0850113e-02 -3.7317285e-02 -4.3701602e-02 -4.9988892e-02 -5.6165193e-02 -6.2216794e-02 -6.8130258e-02 -7.3892456e-02 -7.9490594e-02 -8.4912243e-02 -9.0145365e-02 -9.5178342e-02 -1.0000000e-01 -1.0459963e-01 -1.0896703e-01 -1.1309249e-01 -1.1696686e-01 -1.2058153e-01 -1.2392848e-01 -1.2700028e-01 -1.2979010e-01 -1.3229176e-01 -1.3449970e-01 -1.3640902e-01 -1.3801547e-01 -1.3931550e-01 -1.4030621e-01 -1.4098540e-01 -1.4135157e-01 -1.4140391e-01 -1.4114229e-01 -1.4056731e-01 -1.3968022e-01 -1.3848302e-01 -1.3697834e-01 -1.3516954e-01 -1.3306063e-01 -1.3065630e-01 -1.2796187e-01 -1.2498333e-01 -1.2172730e-01 -1.1820101e-01 -1.1441228e-01 -1.1036953e-01 -1.0608172e-01 -1.0155839e-01 -9.6809580e-02 -9.1845824e-02 -8.6678147e-02 -8.1318023e-02 -7.5777352e-02 -7.0068437e-02 -6.4203952e-02 -5.8196919e-02 -5.2060674e-02 -4.5808841e-02 -3.9455301e-02 -3.3014160e-02 -2.6499720e-02 -1.9926443e-02 -1.3308925e-02 -6.6618581e-03 1.0000000e-01 1.4098540e-01 1.3968022e-01 1.3751387e-01 1.3449970e-01 1.3065630e-01 1.2600735e-01 1.2058153e-01 1.1441228e-01 1.0753764e-01 1.0000000e-01 9.1845824e-02 8.3125388e-02 7.3892456e-02 6.4203952e-02 5.4119610e-02 4.3701602e-02 3.3014160e-02 2.2123174e-02 1.1095792e-02 8.6595606e-18 -1.1095792e-02 -2.2123174e-02 -3.3014160e-02 -4.3701602e-02 -5.4119610e-02 -6.4203952e-02 -7.3892456e-02 -8.3125388e-02 -9.1845824e-02 -1.0000000e-01 -1.0753764e-01 -1.1441228e-01 -1.2058153e-01 -1.2600735e-01 -1.3065630e-01 -1.3449970e-01 -1.3751387e-01 -1.3968022e-01 -1.4098540e-01 -1.4142136e-01 -1.4098540e-01 -1.3968022e-01 -1.3751387e-01 -1.3449970e-01 -1.3065630e-01 -1.2600735e-01 -1.2058153e-01 -1.1441228e-01 -1.0753764e-01 -1.0000000e-01 -9.1845824e-02 -8.3125388e-02 -7.3892456e-02 -6.4203952e-02 -5.4119610e-02 -4.3701602e-02 -3.3014160e-02 -2.2123174e-02 -1.1095792e-02 -2.5978682e-17 1.1095792e-02 2.2123174e-02 3.3014160e-02 4.3701602e-02 5.4119610e-02 6.4203952e-02 7.3892456e-02 8.3125388e-02 9.1845824e-02 1.0000000e-01 1.0753764e-01 1.1441228e-01 1.2058153e-01 1.2600735e-01 1.3065630e-01 1.3449970e-01 1.3751387e-01 1.3968022e-01 1.4098540e-01 1.4142136e-01 1.4098540e-01 1.3968022e-01 1.3751387e-01 1.3449970e-01 1.3065630e-01 1.2600735e-01 1.2058153e-01 1.1441228e-01 1.0753764e-01 1.0000000e-01 9.1845824e-02 8.3125388e-02 7.3892456e-02 6.4203952e-02 5.4119610e-02 4.3701602e-02 3.3014160e-02 2.2123174e-02 1.1095792e-02 1.0000000e-01 1.4056731e-01 1.3801547e-01 1.3379667e-01 1.2796187e-01 1.2058153e-01 1.1174479e-01 1.0155839e-01 9.0145365e-02 7.7643552e-02 6.4203952e-02 4.9988892e-02 3.5170061e-02 1.9926443e-02 4.4421521e-03 -1.1095792e-02 -2.6499720e-02 -4.1583582e-02 -5.6165193e-02 -7.0068437e-02 -8.3125388e-02 -9.5178342e-02 -1.0608172e-01 -1.1570384e-01 -1.2392848e-01 -1.3065630e-01 -1.3580604e-01 -1.3931550e-01 -1.4114229e-01 -1.4126436e-01 -1.3968022e-01 -1.3640902e-01 -1.3149025e-01 -1.2498333e-01 -1.1696686e-01 -1.0753764e-01 -9.6809580e-02 -8.4912243e-02 -7.1989327e-02 -5.8196919e-02 -4.3701602e-02 -2.8678454e-02 -1.3308925e-02 2.2213501e-03 1.7724796e-02 3.3014160e-02 4.7904776e-02 6.2216794e-02 7.5777352e-02 8.8422664e-02 1.0000000e-01 1.1036953e-01 1.1940600e-01 1.2700028e-01 1.3306063e-01 1.3751387e-01 1.4030621e-01 1.4140391e-01 1.4079372e-01 1.3848302e-01 1.3449970e-01 1.2889189e-01 1.2172730e-01 1.1309249e-01 1.0309173e-01 9.1845824e-02 7.9490594e-02 6.6175269e-02 5.2060674e-02 3.7317285e-02 2.2123174e-02 6.6618581e-03 -8.8799204e-03 -2.4314447e-02 -3.9455301e-02 -5.4119610e-02 -6.8130258e-02 -8.1318023e-02 -9.3523621e-02 -1.0459963e-01 -1.1441228e-01 -1.2284305e-01 -1.2979010e-01 -1.3516954e-01 -1.3891640e-01 -1.4098540e-01 -1.4135157e-01 -1.4001049e-01 -1.3697834e-01 -1.3229176e-01 -1.2600735e-01 -1.1820101e-01 -1.0896703e-01 -9.8416932e-02 -8.6678147e-02 -7.3892456e-02 -6.0214285e-02 -4.5808841e-02 -3.0850113e-02 -1.5518775e-02 1.0000000e-01 1.4001049e-01 1.3580604e-01 1.2889189e-01 1.1940600e-01 1.0753764e-01 9.3523621e-02 7.7643552e-02 6.0214285e-02 4.1583582e-02 2.2123174e-02 2.2213501e-03 -1.7724796e-02 -3.7317285e-02 -5.6165193e-02 -7.3892456e-02 -9.0145365e-02 -1.0459963e-01 -1.1696686e-01 -1.2700028e-01 -1.3449970e-01 -1.3931550e-01 -1.4135157e-01 -1.4056731e-01 -1.3697834e-01 -1.3065630e-01 -1.2172730e-01 -1.1036953e-01 -9.6809580e-02 -8.1318023e-02 -6.4203952e-02 -4.5808841e-02 -2.6499720e-02 -6.6618581e-03 1.3308925e-02 3.3014160e-02 5.2060674e-02 7.0068437e-02 8.6678147e-02 1.0155839e-01 1.1441228e-01 1.2498333e-01 1.3306063e-01 1.3848302e-01 1.4114229e-01 1.4098540e-01 1.3801547e-01 1.3229176e-01 1.2392848e-01 1.1309249e-01 1.0000000e-01 8.4912243e-02 6.8130258e-02 4.9988892e-02 3.0850113e-02 1.1095792e-02 -8.8799204e-03 -2.8678454e-02 -4.7904776e-02 -6.6175269e-02 -8.3125388e-02 -9.8416932e-02 -1.1174479e-01 -1.2284305e-01 -1.3149025e-01 -1.3751387e-01 -1.4079372e-01 -1.4126436e-01 -1.3891640e-01 -1.3379667e-01 -1.2600735e-01 -1.1570384e-01 -1.0309173e-01 -8.8422664e-02 -7.1989327e-02 -5.4119610e-02 -3.5170061e-02 -1.5518775e-02 4.4421521e-03 2.4314447e-02 4.3701602e-02 6.2216794e-02 7.9490594e-02 9.5178342e-02 1.0896703e-01 1.2058153e-01 1.2979010e-01 1.3640902e-01 1.4030621e-01 1.4140391e-01 1.3968022e-01 1.3516954e-01 1.2796187e-01 1.1820101e-01 1.0608172e-01 9.1845824e-02 7.5777352e-02 5.8196919e-02 3.9455301e-02 1.9926443e-02 1.0000000e-01 1.3931550e-01 1.3306063e-01 1.2284305e-01 1.0896703e-01 9.1845824e-02 7.1989327e-02 4.9988892e-02 2.6499720e-02 2.2213501e-03 -2.2123174e-02 -4.5808841e-02 -6.8130258e-02 -8.8422664e-02 -1.0608172e-01 -1.2058153e-01 -1.3149025e-01 -1.3848302e-01 -1.4135157e-01 -1.4001049e-01 -1.3449970e-01 -1.2498333e-01 -1.1174479e-01 -9.5178342e-02 -7.5777352e-02 -5.4119610e-02 -3.0850113e-02 -6.6618581e-03 1.7724796e-02 4.1583582e-02 6.4203952e-02 8.4912243e-02 1.0309173e-01 1.1820101e-01 1.2979010e-01 1.3751387e-01 1.4114229e-01 1.4056731e-01 1.3580604e-01 1.2700028e-01 1.1441228e-01 9.8416932e-02 7.9490594e-02 5.8196919e-02 3.5170061e-02 1.1095792e-02 -1.3308925e-02 -3.7317285e-02 -6.0214285e-02 -8.1318023e-02 -1.0000000e-01 -1.1570384e-01 -1.2796187e-01 -1.3640902e-01 -1.4079372e-01 -1.4098540e-01 -1.3697834e-01 -1.2889189e-01 -1.1696686e-01 -1.0155839e-01 -8.3125388e-02 -6.2216794e-02 -3.9455301e-02 -1.5518775e-02 8.8799204e-03 3.3014160e-02 5.6165193e-02 7.7643552e-02 9.6809580e-02 1.1309249e-01 1.2600735e-01 1.3516954e-01 1.4030621e-01 1.4126436e-01 1.3801547e-01 1.3065630e-01 1.1940600e-01 1.0459963e-01 8.6678147e-02 6.6175269e-02 4.3701602e-02 1.9926443e-02 -4.4421521e-03 -2.8678454e-02 -5.2060674e-02 -7.3892456e-02 -9.3523621e-02 -1.1036953e-01 -1.2392848e-01 -1.3379667e-01 -1.3968022e-01 -1.4140391e-01 -1.3891640e-01 -1.3229176e-01 -1.2172730e-01 -1.0753764e-01 -9.0145365e-02 -7.0068437e-02 -4.7904776e-02 -2.4314447e-02 1.0000000e-01 1.3848302e-01 1.2979010e-01 1.1570384e-01 9.6809580e-02 7.3892456e-02 4.7904776e-02 1.9926443e-02 -8.8799204e-03 -3.7317285e-02 -6.4203952e-02 -8.8422664e-02 -1.0896703e-01 -1.2498333e-01 -1.3580604e-01 -1.4098540e-01 -1.4030621e-01 -1.3379667e-01 -1.2172730e-01 -1.0459963e-01 -8.3125388e-02 -5.8196919e-02 -3.0850113e-02 -2.2213501e-03 2.6499720e-02 5.4119610e-02 7.9490594e-02 1.0155839e-01 1.1940600e-01 1.3229176e-01 1.3968022e-01 1.4126436e-01 1.3697834e-01 1.2700028e-01 1.1174479e-01 9.1845824e-02 6.8130258e-02 4.1583582e-02 1.3308925e-02 -1.5518775e-02 -4.3701602e-02 -7.0068437e-02 -9.3523621e-02 -1.1309249e-01 -1.2796187e-01 -1.3751387e-01 -1.4135157e-01 -1.3931550e-01 -1.3149025e-01 -1.1820101e-01 -1.0000000e-01 -7.7643552e-02 -5.2060674e-02 -2.4314447e-02 4.4421521e-03 3.3014160e-02 6.0214285e-02 8.4912243e-02 1.0608172e-01 1.2284305e-01 1.3449970e-01 1.4056731e-01 1.4079372e-01 1.3516954e-01 1.2392848e-01 1.0753764e-01 8.6678147e-02 6.2216794e-02 3.5170061e-02 6.6618581e-03 -2.2123174e-02 -4.9988892e-02 -7.5777352e-02 -9.8416932e-02 -1.1696686e-01 -1.3065630e-01 -1.3891640e-01 -1.4140391e-01 -1.3801547e-01 -1.2889189e-01 -1.1441228e-01 -9.5178342e-02 -7.1989327e-02 -4.5808841e-02 -1.7724796e-02 1.1095792e-02 3.9455301e-02 6.6175269e-02 9.0145365e-02 1.1036953e-01 1.2600735e-01 1.3640902e-01 1.4114229e-01 1.4001049e-01 1.3306063e-01 1.2058153e-01 1.0309173e-01 8.1318023e-02 5.6165193e-02 2.8678454e-02 1.0000000e-01 1.3751387e-01 1.2600735e-01 1.0753764e-01 8.3125388e-02 5.4119610e-02 2.2123174e-02 -1.1095792e-02 -4.3701602e-02 -7.3892456e-02 -1.0000000e-01 -1.2058153e-01 -1.3449970e-01 -1.4098540e-01 -1.3968022e-01 -1.3065630e-01 -1.1441228e-01 -9.1845824e-02 -6.4203952e-02 -3.3014160e-02 -2.5978682e-17 3.3014160e-02 6.4203952e-02 9.1845824e-02 1.1441228e-01 1.3065630e-01 1.3968022e-01 1.4098540e-01 1.3449970e-01 1.2058153e-01 1.0000000e-01 7.3892456e-02 4.3701602e-02 1.1095792e-02 -2.2123174e-02 -5.4119610e-02 -8.3125388e-02 -1.0753764e-01 -1.2600735e-01 -1.3751387e-01 -1.4142136e-01 -1.3751387e-01 -1.2600735e-01 -1.0753764e-01 -8.3125388e-02 -5.4119610e-02 -2.2123174e-02 1.1095792e-02 4.3701602e-02 7.3892456e-02 1.0000000e-01 1.2058153e-01 1.3449970e-01 1.4098540e-01 1.3968022e-01 1.3065630e-01 1.1441228e-01 9.1845824e-02 6.4203952e-02 3.3014160e-02 7.7936045e-17 -3.3014160e-02 -6.4203952e-02 -9.1845824e-02 -1.1441228e-01 -1.3065630e-01 -1.3968022e-01 -1.4098540e-01 -1.3449970e-01 -1.2058153e-01 -1.0000000e-01 -7.3892456e-02 -4.3701602e-02 -1.1095792e-02 2.2123174e-02 5.4119610e-02 8.3125388e-02 1.0753764e-01 1.2600735e-01 1.3751387e-01 1.4142136e-01 1.3751387e-01 1.2600735e-01 1.0753764e-01 8.3125388e-02 5.4119610e-02 2.2123174e-02 -1.1095792e-02 -4.3701602e-02 -7.3892456e-02 -1.0000000e-01 -1.2058153e-01 -1.3449970e-01 -1.4098540e-01 -1.3968022e-01 -1.3065630e-01 -1.1441228e-01 -9.1845824e-02 -6.4203952e-02 -3.3014160e-02 1.0000000e-01 1.3640902e-01 1.2172730e-01 9.8416932e-02 6.8130258e-02 3.3014160e-02 -4.4421521e-03 -4.1583582e-02 -7.5777352e-02 -1.0459963e-01 -1.2600735e-01 -1.3848302e-01 -1.4114229e-01 -1.3379667e-01 -1.1696686e-01 -9.1845824e-02 -6.0214285e-02 -2.4314447e-02 1.3308925e-02 4.9988892e-02 8.3125388e-02 1.1036953e-01 1.2979010e-01 1.4001049e-01 1.4030621e-01 1.3065630e-01 1.1174479e-01 8.4912243e-02 5.2060674e-02 1.5518775e-02 -2.2123174e-02 -5.8196919e-02 -9.0145365e-02 -1.1570384e-01 -1.3306063e-01 -1.4098540e-01 -1.3891640e-01 -1.2700028e-01 -1.0608172e-01 -7.7643552e-02 -4.3701602e-02 -6.6618581e-03 3.0850113e-02 6.6175269e-02 9.6809580e-02 1.2058153e-01 1.3580604e-01 1.4140391e-01 1.3697834e-01 1.2284305e-01 1.0000000e-01 7.0068437e-02 3.5170061e-02 -2.2213501e-03 -3.9455301e-02 -7.3892456e-02 -1.0309173e-01 -1.2498333e-01 -1.3801547e-01 -1.4126436e-01 -1.3449970e-01 -1.1820101e-01 -9.3523621e-02 -6.2216794e-02 -2.6499720e-02 1.1095792e-02 4.7904776e-02 8.1318023e-02 1.0896703e-01 1.2889189e-01 1.3968022e-01 1.4056731e-01 1.3149025e-01 1.1309249e-01 8.6678147e-02 5.4119610e-02 1.7724796e-02 -1.9926443e-02 -5.6165193e-02 -8.8422664e-02 -1.1441228e-01 -1.3229176e-01 -1.4079372e-01 -1.3931550e-01 -1.2796187e-01 -1.0753764e-01 -7.9490594e-02 -4.5808841e-02 -8.8799204e-03 2.8678454e-02 6.4203952e-02 9.5178342e-02 1.1940600e-01 1.3516954e-01 1.4135157e-01 1.3751387e-01 1.2392848e-01 1.0155839e-01 7.1989327e-02 3.7317285e-02 1.0000000e-01 1.3516954e-01 1.1696686e-01 8.8422664e-02 5.2060674e-02 1.1095792e-02 -3.0850113e-02 -7.0068437e-02 -1.0309173e-01 -1.2700028e-01 -1.3968022e-01 -1.4001049e-01 -1.2796187e-01 -1.0459963e-01 -7.1989327e-02 -3.3014160e-02 8.8799204e-03 4.9988892e-02 8.6678147e-02 1.1570384e-01 1.3449970e-01 1.4140391e-01 1.3580604e-01 1.1820101e-01 9.0145365e-02 5.4119610e-02 1.3308925e-02 -2.8678454e-02 -6.8130258e-02 -1.0155839e-01 -1.2600735e-01 -1.3931550e-01 -1.4030621e-01 -1.2889189e-01 -1.0608172e-01 -7.3892456e-02 -3.5170061e-02 6.6618581e-03 4.7904776e-02 8.4912243e-02 1.1441228e-01 1.3379667e-01 1.4135157e-01 1.3640902e-01 1.1940600e-01 9.1845824e-02 5.6165193e-02 1.5518775e-02 -2.6499720e-02 -6.6175269e-02 -1.0000000e-01 -1.2498333e-01 -1.3891640e-01 -1.4056731e-01 -1.2979010e-01 -1.0753764e-01 -7.5777352e-02 -3.7317285e-02 4.4421521e-03 4.5808841e-02 8.3125388e-02 1.1309249e-01 1.3306063e-01 1.4126436e-01 1.3697834e-01 1.2058153e-01 9.3523621e-02 5.8196919e-02 1.7724796e-02 -2.4314447e-02 -6.4203952e-02 -9.8416932e-02 -1.2392848e-01 -1.3848302e-01 -1.4079372e-01 -1.3065630e-01 -1.0896703e-01 -7.7643552e-02 -3.9455301e-02 2.2213501e-03 4.3701602e-02 8.1318023e-02 1.1174479e-01 1.3229176e-01 1.4114229e-01 1.3751387e-01 1.2172730e-01 9.5178342e-02 6.0214285e-02 1.9926443e-02 -2.2123174e-02 -6.2216794e-02 -9.6809580e-02 -1.2284305e-01 -1.3801547e-01 -1.4098540e-01 -1.3149025e-01 -1.1036953e-01 -7.9490594e-02 -4.1583582e-02 1.0000000e-01 1.3379667e-01 1.1174479e-01 7.7643552e-02 3.5170061e-02 -1.1095792e-02 -5.6165193e-02 -9.5178342e-02 -1.2392848e-01 -1.3931550e-01 -1.3968022e-01 -1.2498333e-01 -9.6809580e-02 -5.8196919e-02 -1.3308925e-02 3.3014160e-02 7.5777352e-02 1.1036953e-01 1.3306063e-01 1.4140391e-01 1.3449970e-01 1.1309249e-01 7.9490594e-02 3.7317285e-02 -8.8799204e-03 -5.4119610e-02 -9.3523621e-02 -1.2284305e-01 -1.3891640e-01 -1.4001049e-01 -1.2600735e-01 -9.8416932e-02 -6.0214285e-02 -1.5518775e-02 3.0850113e-02 7.3892456e-02 1.0896703e-01 1.3229176e-01 1.4135157e-01 1.3516954e-01 1.1441228e-01 8.1318023e-02 3.9455301e-02 -6.6618581e-03 -5.2060674e-02 -9.1845824e-02 -1.2172730e-01 -1.3848302e-01 -1.4030621e-01 -1.2700028e-01 -1.0000000e-01 -6.2216794e-02 -1.7724796e-02 2.8678454e-02 7.1989327e-02 1.0753764e-01 1.3149025e-01 1.4126436e-01 1.3580604e-01 1.1570384e-01 8.3125388e-02 4.1583582e-02 -4.4421521e-03 -4.9988892e-02 -9.0145365e-02 -1.2058153e-01 -1.3801547e-01 -1.4056731e-01 -1.2796187e-01 -1.0155839e-01 -6.4203952e-02 -1.9926443e-02 2.6499720e-02 7.0068437e-02 1.0608172e-01 1.3065630e-01 1.4114229e-01 1.3640902e-01 1.1696686e-01 8.4912243e-02 4.3701602e-02 -2.2213501e-03 -4.7904776e-02 -8.8422664e-02 -1.1940600e-01 -1.3751387e-01 -1.4079372e-01 -1.2889189e-01 -1.0309173e-01 -6.6175269e-02 -2.2123174e-02 2.4314447e-02 6.8130258e-02 1.0459963e-01 1.2979010e-01 1.4098540e-01 1.3697834e-01 1.1820101e-01 8.6678147e-02 4.5808841e-02 1.0000000e-01 1.3229176e-01 1.0608172e-01 6.6175269e-02 1.7724796e-02 -3.3014160e-02 -7.9490594e-02 -1.1570384e-01 -1.3697834e-01 -1.4056731e-01 -1.2600735e-01 -9.5178342e-02 -5.2060674e-02 -2.2213501e-03 4.7904776e-02 9.1845824e-02 1.2392848e-01 1.4001049e-01 1.3801547e-01 1.1820101e-01 8.3125388e-02 3.7317285e-02 -1.3308925e-02 -6.2216794e-02 -1.0309173e-01 -1.3065630e-01 -1.4135157e-01 -1.3379667e-01 -1.0896703e-01 -7.0068437e-02 -2.2123174e-02 2.8678454e-02 7.5777352e-02 1.1309249e-01 1.3580604e-01 1.4098540e-01 1.2796187e-01 9.8416932e-02 5.6165193e-02 6.6618581e-03 -4.3701602e-02 -8.8422664e-02 -1.2172730e-01 -1.3931550e-01 -1.3891640e-01 -1.2058153e-01 -8.6678147e-02 -4.1583582e-02 8.8799204e-03 5.8196919e-02 1.0000000e-01 1.2889189e-01 1.4114229e-01 1.3516954e-01 1.1174479e-01 7.3892456e-02 2.6499720e-02 -2.4314447e-02 -7.1989327e-02 -1.1036953e-01 -1.3449970e-01 -1.4126436e-01 -1.2979010e-01 -1.0155839e-01 -6.0214285e-02 -1.1095792e-02 3.9455301e-02 8.4912243e-02 1.1940600e-01 1.3848302e-01 1.3968022e-01 1.2284305e-01 9.0145365e-02 4.5808841e-02 -4.4421521e-03 -5.4119610e-02 -9.6809580e-02 -1.2700028e-01 -1.4079372e-01 -1.3640902e-01 -1.1441228e-01 -7.7643552e-02 -3.0850113e-02 1.9926443e-02 6.8130258e-02 1.0753764e-01 1.3306063e-01 1.4140391e-01 1.3149025e-01 1.0459963e-01 6.4203952e-02 1.5518775e-02 -3.5170061e-02 -8.1318023e-02 -1.1696686e-01 -1.3751387e-01 -1.4030621e-01 -1.2498333e-01 -9.3523621e-02 -4.9988892e-02 1.0000000e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 8.6595606e-18 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 -2.5978682e-17 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 -8.2309594e-17 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.9059787e-16 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 7.7936045e-17 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.5595963e-16 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.3864051e-16 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 -3.8110820e-16 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.0400226e-16 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 8.6683143e-17 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 -6.9364022e-17 -5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -1.4142136e-01 -1.3065630e-01 -1.0000000e-01 -5.4119610e-02 5.5447449e-16 5.4119610e-02 1.0000000e-01 1.3065630e-01 1.4142136e-01 1.3065630e-01 1.0000000e-01 5.4119610e-02 1.0000000e-01 1.2889189e-01 9.3523621e-02 4.1583582e-02 -1.7724796e-02 -7.3892456e-02 -1.1696686e-01 -1.3931550e-01 -1.3697834e-01 -1.1036953e-01 -6.4203952e-02 -6.6618581e-03 5.2060674e-02 1.0155839e-01 1.3306063e-01 1.4098540e-01 1.2392848e-01 8.4912243e-02 3.0850113e-02 -2.8678454e-02 -8.3125388e-02 -1.2284305e-01 -1.4079372e-01 -1.3379667e-01 -1.0309173e-01 -5.4119610e-02 4.4421521e-03 6.2216794e-02 1.0896703e-01 1.3640902e-01 1.3968022e-01 1.1820101e-01 7.5777352e-02 1.9926443e-02 -3.9455301e-02 -9.1845824e-02 -1.2796187e-01 -1.4140391e-01 -1.2979010e-01 -9.5178342e-02 -4.3701602e-02 1.5518775e-02 7.1989327e-02 1.1570384e-01 1.3891640e-01 1.3751387e-01 1.1174479e-01 6.6175269e-02 8.8799204e-03 -4.9988892e-02 -1.0000000e-01 -1.3229176e-01 -1.4114229e-01 -1.2498333e-01 -8.6678147e-02 -3.3014160e-02 2.6499720e-02 8.1318023e-02 1.2172730e-01 1.4056731e-01 1.3449970e-01 1.0459963e-01 5.6165193e-02 -2.2213501e-03 -6.0214285e-02 -1.0753764e-01 -1.3580604e-01 -1.4001049e-01 -1.1940600e-01 -7.7643552e-02 -2.2123174e-02 3.7317285e-02 9.0145365e-02 1.2700028e-01 1.4135157e-01 1.3065630e-01 9.6809580e-02 4.5808841e-02 -1.3308925e-02 -7.0068437e-02 -1.1441228e-01 -1.3848302e-01 -1.3801547e-01 -1.1309249e-01 -6.8130258e-02 -1.1095792e-02 4.7904776e-02 9.8416932e-02 1.3149025e-01 1.4126436e-01 1.2600735e-01 8.8422664e-02 3.5170061e-02 -2.4314447e-02 -7.9490594e-02 -1.2058153e-01 -1.4030621e-01 -1.3516954e-01 -1.0608172e-01 -5.8196919e-02 1.0000000e-01 1.2700028e-01 8.6678147e-02 2.8678454e-02 -3.5170061e-02 -9.1845824e-02 -1.2979010e-01 -1.4126436e-01 -1.2392848e-01 -8.1318023e-02 -2.2123174e-02 4.1583582e-02 9.6809580e-02 1.3229176e-01 1.4079372e-01 1.2058153e-01 7.5777352e-02 1.5518775e-02 -4.7904776e-02 -1.0155839e-01 -1.3449970e-01 -1.4001049e-01 -1.1696686e-01 -7.0068437e-02 -8.8799204e-03 5.4119610e-02 1.0608172e-01 1.3640902e-01 1.3891640e-01 1.1309249e-01 6.4203952e-02 2.2213501e-03 -6.0214285e-02 -1.1036953e-01 -1.3801547e-01 -1.3751387e-01 -1.0896703e-01 -5.8196919e-02 4.4421521e-03 6.6175269e-02 1.1441228e-01 1.3931550e-01 1.3580604e-01 1.0459963e-01 5.2060674e-02 -1.1095792e-02 -7.1989327e-02 -1.1820101e-01 -1.4030621e-01 -1.3379667e-01 -1.0000000e-01 -4.5808841e-02 1.7724796e-02 7.7643552e-02 1.2172730e-01 1.4098540e-01 1.3149025e-01 9.5178342e-02 3.9455301e-02 -2.4314447e-02 -8.3125388e-02 -1.2498333e-01 -1.4135157e-01 -1.2889189e-01 -9.0145365e-02 -3.3014160e-02 3.0850113e-02 8.8422664e-02 1.2796187e-01 1.4140391e-01 1.2600735e-01 8.4912243e-02 2.6499720e-02 -3.7317285e-02 -9.3523621e-02 -1.3065630e-01 -1.4114229e-01 -1.2284305e-01 -7.9490594e-02 -1.9926443e-02 4.3701602e-02 9.8416932e-02 1.3306063e-01 1.4056731e-01 1.1940600e-01 7.3892456e-02 1.3308925e-02 -4.9988892e-02 -1.0309173e-01 -1.3516954e-01 -1.3968022e-01 -1.1570384e-01 -6.8130258e-02 -6.6618581e-03 5.6165193e-02 1.0753764e-01 1.3697834e-01 1.3848302e-01 1.1174479e-01 6.2216794e-02 1.0000000e-01 1.2498333e-01 7.9490594e-02 1.5518775e-02 -5.2060674e-02 -1.0753764e-01 -1.3801547e-01 -1.3640902e-01 -1.0309173e-01 -4.5808841e-02 2.2123174e-02 8.4912243e-02 1.2796187e-01 1.4126436e-01 1.2172730e-01 7.3892456e-02 8.8799204e-03 -5.8196919e-02 -1.1174479e-01 -1.3931550e-01 -1.3449970e-01 -9.8416932e-02 -3.9455301e-02 2.8678454e-02 9.0145365e-02 1.3065630e-01 1.4079372e-01 1.1820101e-01 6.8130258e-02 2.2213501e-03 -6.4203952e-02 -1.1570384e-01 -1.4030621e-01 -1.3229176e-01 -9.3523621e-02 -3.3014160e-02 3.5170061e-02 9.5178342e-02 1.3306063e-01 1.4001049e-01 1.1441228e-01 6.2216794e-02 -4.4421521e-03 -7.0068437e-02 -1.1940600e-01 -1.4098540e-01 -1.2979010e-01 -8.8422664e-02 -2.6499720e-02 4.1583582e-02 1.0000000e-01 1.3516954e-01 1.3891640e-01 1.1036953e-01 5.6165193e-02 -1.1095792e-02 -7.5777352e-02 -1.2284305e-01 -1.4135157e-01 -1.2700028e-01 -8.3125388e-02 -1.9926443e-02 4.7904776e-02 1.0459963e-01 1.3697834e-01 1.3751387e-01 1.0608172e-01 4.9988892e-02 -1.7724796e-02 -8.1318023e-02 -1.2600735e-01 -1.4140391e-01 -1.2392848e-01 -7.7643552e-02 -1.3308925e-02 5.4119610e-02 1.0896703e-01 1.3848302e-01 1.3580604e-01 1.0155839e-01 4.3701602e-02 -2.4314447e-02 -8.6678147e-02 -1.2889189e-01 -1.4114229e-01 -1.2058153e-01 -7.1989327e-02 -6.6618581e-03 6.0214285e-02 1.1309249e-01 1.3968022e-01 1.3379667e-01 9.6809580e-02 3.7317285e-02 -3.0850113e-02 -9.1845824e-02 -1.3149025e-01 -1.4056731e-01 -1.1696686e-01 -6.6175269e-02 1.0000000e-01 1.2284305e-01 7.1989327e-02 2.2213501e-03 -6.8130258e-02 -1.2058153e-01 -1.4135157e-01 -1.2498333e-01 -7.5777352e-02 -6.6618581e-03 6.4203952e-02 1.1820101e-01 1.4114229e-01 1.2700028e-01 7.9490594e-02 1.1095792e-02 -6.0214285e-02 -1.1570384e-01 -1.4079372e-01 -1.2889189e-01 -8.3125388e-02 -1.5518775e-02 5.6165193e-02 1.1309249e-01 1.4030621e-01 1.3065630e-01 8.6678147e-02 1.9926443e-02 -5.2060674e-02 -1.1036953e-01 -1.3968022e-01 -1.3229176e-01 -9.0145365e-02 -2.4314447e-02 4.7904776e-02 1.0753764e-01 1.3891640e-01 1.3379667e-01 9.3523621e-02 2.8678454e-02 -4.3701602e-02 -1.0459963e-01 -1.3801547e-01 -1.3516954e-01 -9.6809580e-02 -3.3014160e-02 3.9455301e-02 1.0155839e-01 1.3697834e-01 1.3640902e-01 1.0000000e-01 3.7317285e-02 -3.5170061e-02 -9.8416932e-02 -1.3580604e-01 -1.3751387e-01 -1.0309173e-01 -4.1583582e-02 3.0850113e-02 9.5178342e-02 1.3449970e-01 1.3848302e-01 1.0608172e-01 4.5808841e-02 -2.6499720e-02 -9.1845824e-02 -1.3306063e-01 -1.3931550e-01 -1.0896703e-01 -4.9988892e-02 2.2123174e-02 8.8422664e-02 1.3149025e-01 1.4001049e-01 1.1174479e-01 5.4119610e-02 -1.7724796e-02 -8.4912243e-02 -1.2979010e-01 -1.4056731e-01 -1.1441228e-01 -5.8196919e-02 1.3308925e-02 8.1318023e-02 1.2796187e-01 1.4098540e-01 1.1696686e-01 6.2216794e-02 -8.8799204e-03 -7.7643552e-02 -1.2600735e-01 -1.4126436e-01 -1.1940600e-01 -6.6175269e-02 4.4421521e-03 7.3892456e-02 1.2392848e-01 1.4140391e-01 1.2172730e-01 7.0068437e-02 1.0000000e-01 1.2058153e-01 6.4203952e-02 -1.1095792e-02 -8.3125388e-02 -1.3065630e-01 -1.3968022e-01 -1.0753764e-01 -4.3701602e-02 3.3014160e-02 1.0000000e-01 1.3751387e-01 1.3449970e-01 9.1845824e-02 2.2123174e-02 -5.4119610e-02 -1.1441228e-01 -1.4098540e-01 -1.2600735e-01 -7.3892456e-02 -3.1183172e-16 7.3892456e-02 1.2600735e-01 1.4098540e-01 1.1441228e-01 5.4119610e-02 -2.2123174e-02 -9.1845824e-02 -1.3449970e-01 -1.3751387e-01 -1.0000000e-01 -3.3014160e-02 4.3701602e-02 1.0753764e-01 1.3968022e-01 1.3065630e-01 8.3125388e-02 1.1095792e-02 -6.4203952e-02 -1.2058153e-01 -1.4142136e-01 -1.2058153e-01 -6.4203952e-02 1.1095792e-02 8.3125388e-02 1.3065630e-01 1.3968022e-01 1.0753764e-01 4.3701602e-02 -3.3014160e-02 -1.0000000e-01 -1.3751387e-01 -1.3449970e-01 -9.1845824e-02 -2.2123174e-02 5.4119610e-02 1.1441228e-01 1.4098540e-01 1.2600735e-01 7.3892456e-02 -6.9364022e-17 -7.3892456e-02 -1.2600735e-01 -1.4098540e-01 -1.1441228e-01 -5.4119610e-02 2.2123174e-02 9.1845824e-02 1.3449970e-01 1.3751387e-01 1.0000000e-01 3.3014160e-02 -4.3701602e-02 -1.0753764e-01 -1.3968022e-01 -1.3065630e-01 -8.3125388e-02 -1.1095792e-02 6.4203952e-02 1.2058153e-01 1.4142136e-01 1.2058153e-01 6.4203952e-02 -1.1095792e-02 -8.3125388e-02 -1.3065630e-01 -1.3968022e-01 -1.0753764e-01 -4.3701602e-02 3.3014160e-02 1.0000000e-01 1.3751387e-01 1.3449970e-01 9.1845824e-02 2.2123174e-02 -5.4119610e-02 -1.1441228e-01 -1.4098540e-01 -1.2600735e-01 -7.3892456e-02 1.0000000e-01 1.1820101e-01 5.6165193e-02 -2.4314447e-02 -9.6809580e-02 -1.3751387e-01 -1.3306063e-01 -8.4912243e-02 -8.8799204e-03 7.0068437e-02 1.2600735e-01 1.4056731e-01 1.0896703e-01 4.1583582e-02 -3.9455301e-02 -1.0753764e-01 -1.4030621e-01 -1.2700028e-01 -7.1989327e-02 6.6618581e-03 8.3125388e-02 1.3229176e-01 1.3801547e-01 9.8416932e-02 2.6499720e-02 -5.4119610e-02 -1.1696686e-01 -1.4140391e-01 -1.1940600e-01 -5.8196919e-02 2.2123174e-02 9.5178342e-02 1.3697834e-01 1.3379667e-01 8.6678147e-02 1.1095792e-02 -6.8130258e-02 -1.2498333e-01 -1.4079372e-01 -1.1036953e-01 -4.3701602e-02 3.7317285e-02 1.0608172e-01 1.4001049e-01 1.2796187e-01 7.3892456e-02 -4.4421521e-03 -8.1318023e-02 -1.3149025e-01 -1.3848302e-01 -1.0000000e-01 -2.8678454e-02 5.2060674e-02 1.1570384e-01 1.4135157e-01 1.2058153e-01 6.0214285e-02 -1.9926443e-02 -9.3523621e-02 -1.3640902e-01 -1.3449970e-01 -8.8422664e-02 -1.3308925e-02 6.6175269e-02 1.2392848e-01 1.4098540e-01 1.1174479e-01 4.5808841e-02 -3.5170061e-02 -1.0459963e-01 -1.3968022e-01 -1.2889189e-01 -7.5777352e-02 2.2213501e-03 7.9490594e-02 1.3065630e-01 1.3891640e-01 1.0155839e-01 3.0850113e-02 -4.9988892e-02 -1.1441228e-01 -1.4126436e-01 -1.2172730e-01 -6.2216794e-02 1.7724796e-02 9.1845824e-02 1.3580604e-01 1.3516954e-01 9.0145365e-02 1.5518775e-02 -6.4203952e-02 -1.2284305e-01 -1.4114229e-01 -1.1309249e-01 -4.7904776e-02 3.3014160e-02 1.0309173e-01 1.3931550e-01 1.2979010e-01 7.7643552e-02 1.0000000e-01 1.1570384e-01 4.7904776e-02 -3.7317285e-02 -1.0896703e-01 -1.4098540e-01 -1.2172730e-01 -5.8196919e-02 2.6499720e-02 1.0155839e-01 1.3968022e-01 1.2700028e-01 6.8130258e-02 -1.5518775e-02 -9.3523621e-02 -1.3751387e-01 -1.3149025e-01 -7.7643552e-02 4.4421521e-03 8.4912243e-02 1.3449970e-01 1.3516954e-01 8.6678147e-02 6.6618581e-03 -7.5777352e-02 -1.3065630e-01 -1.3801547e-01 -9.5178342e-02 -1.7724796e-02 6.6175269e-02 1.2600735e-01 1.4001049e-01 1.0309173e-01 2.8678454e-02 -5.6165193e-02 -1.2058153e-01 -1.4114229e-01 -1.1036953e-01 -3.9455301e-02 4.5808841e-02 1.1441228e-01 1.4140391e-01 1.1696686e-01 4.9988892e-02 -3.5170061e-02 -1.0753764e-01 -1.4079372e-01 -1.2284305e-01 -6.0214285e-02 2.4314447e-02 1.0000000e-01 1.3931550e-01 1.2796187e-01 7.0068437e-02 -1.3308925e-02 -9.1845824e-02 -1.3697834e-01 -1.3229176e-01 -7.9490594e-02 2.2213501e-03 8.3125388e-02 1.3379667e-01 1.3580604e-01 8.8422664e-02 8.8799204e-03 -7.3892456e-02 -1.2979010e-01 -1.3848302e-01 -9.6809580e-02 -1.9926443e-02 6.4203952e-02 1.2498333e-01 1.4030621e-01 1.0459963e-01 3.0850113e-02 -5.4119610e-02 -1.1940600e-01 -1.4126436e-01 -1.1174479e-01 -4.1583582e-02 4.3701602e-02 1.1309249e-01 1.4135157e-01 1.1820101e-01 5.2060674e-02 -3.3014160e-02 -1.0608172e-01 -1.4056731e-01 -1.2392848e-01 -6.2216794e-02 2.2123174e-02 9.8416932e-02 1.3891640e-01 1.2889189e-01 7.1989327e-02 -1.1095792e-02 -9.0145365e-02 -1.3640902e-01 -1.3306063e-01 -8.1318023e-02 1.0000000e-01 1.1309249e-01 3.9455301e-02 -4.9988892e-02 -1.1940600e-01 -1.4098540e-01 -1.0608172e-01 -2.8678454e-02 6.0214285e-02 1.2498333e-01 1.3968022e-01 9.8416932e-02 1.7724796e-02 -7.0068437e-02 -1.2979010e-01 -1.3751387e-01 -9.0145365e-02 -6.6618581e-03 7.9490594e-02 1.3379667e-01 1.3449970e-01 8.1318023e-02 -4.4421521e-03 -8.8422664e-02 -1.3697834e-01 -1.3065630e-01 -7.1989327e-02 1.5518775e-02 9.6809580e-02 1.3931550e-01 1.2600735e-01 6.2216794e-02 -2.6499720e-02 -1.0459963e-01 -1.4079372e-01 -1.2058153e-01 -5.2060674e-02 3.7317285e-02 1.1174479e-01 1.4140391e-01 1.1441228e-01 4.1583582e-02 -4.7904776e-02 -1.1820101e-01 -1.4114229e-01 -1.0753764e-01 -3.0850113e-02 5.8196919e-02 1.2392848e-01 1.4001049e-01 1.0000000e-01 1.9926443e-02 -6.8130258e-02 -1.2889189e-01 -1.3801547e-01 -9.1845824e-02 -8.8799204e-03 7.7643552e-02 1.3306063e-01 1.3516954e-01 8.3125388e-02 -2.2213501e-03 -8.6678147e-02 -1.3640902e-01 -1.3149025e-01 -7.3892456e-02 1.3308925e-02 9.5178342e-02 1.3891640e-01 1.2700028e-01 6.4203952e-02 -2.4314447e-02 -1.0309173e-01 -1.4056731e-01 -1.2172730e-01 -5.4119610e-02 3.5170061e-02 1.1036953e-01 1.4135157e-01 1.1570384e-01 4.3701602e-02 -4.5808841e-02 -1.1696686e-01 -1.4126436e-01 -1.0896703e-01 -3.3014160e-02 5.6165193e-02 1.2284305e-01 1.4030621e-01 1.0155839e-01 2.2123174e-02 -6.6175269e-02 -1.2796187e-01 -1.3848302e-01 -9.3523621e-02 -1.1095792e-02 7.5777352e-02 1.3229176e-01 1.3580604e-01 8.4912243e-02 1.0000000e-01 1.1036953e-01 3.0850113e-02 -6.2216794e-02 -1.2796187e-01 -1.3751387e-01 -8.6678147e-02 2.2213501e-03 9.0145365e-02 1.3848302e-01 1.2600735e-01 5.8196919e-02 -3.5170061e-02 -1.1309249e-01 -1.4135157e-01 -1.0753764e-01 -2.6499720e-02 6.6175269e-02 1.2979010e-01 1.3640902e-01 8.3125388e-02 -6.6618581e-03 -9.3523621e-02 -1.3931550e-01 -1.2392848e-01 -5.4119610e-02 3.9455301e-02 1.1570384e-01 1.4114229e-01 1.0459963e-01 2.2123174e-02 -7.0068437e-02 -1.3149025e-01 -1.3516954e-01 -7.9490594e-02 1.1095792e-02 9.6809580e-02 1.4001049e-01 1.2172730e-01 4.9988892e-02 -4.3701602e-02 -1.1820101e-01 -1.4079372e-01 -1.0155839e-01 -1.7724796e-02 7.3892456e-02 1.3306063e-01 1.3379667e-01 7.5777352e-02 -1.5518775e-02 -1.0000000e-01 -1.4056731e-01 -1.1940600e-01 -4.5808841e-02 4.7904776e-02 1.2058153e-01 1.4030621e-01 9.8416932e-02 1.3308925e-02 -7.7643552e-02 -1.3449970e-01 -1.3229176e-01 -7.1989327e-02 1.9926443e-02 1.0309173e-01 1.4098540e-01 1.1696686e-01 4.1583582e-02 -5.2060674e-02 -1.2284305e-01 -1.3968022e-01 -9.5178342e-02 -8.8799204e-03 8.1318023e-02 1.3580604e-01 1.3065630e-01 6.8130258e-02 -2.4314447e-02 -1.0608172e-01 -1.4126436e-01 -1.1441228e-01 -3.7317285e-02 5.6165193e-02 1.2498333e-01 1.3891640e-01 9.1845824e-02 4.4421521e-03 -8.4912243e-02 -1.3697834e-01 -1.2889189e-01 -6.4203952e-02 2.8678454e-02 1.0896703e-01 1.4140391e-01 1.1174479e-01 3.3014160e-02 -6.0214285e-02 -1.2700028e-01 -1.3801547e-01 -8.8422664e-02 1.0000000e-01 1.0753764e-01 2.2123174e-02 -7.3892456e-02 -1.3449970e-01 -1.3065630e-01 -6.4203952e-02 3.3014160e-02 1.1441228e-01 1.4098540e-01 1.0000000e-01 1.1095792e-02 -8.3125388e-02 -1.3751387e-01 -1.2600735e-01 -5.4119610e-02 4.3701602e-02 1.2058153e-01 1.3968022e-01 9.1845824e-02 7.7936045e-17 -9.1845824e-02 -1.3968022e-01 -1.2058153e-01 -4.3701602e-02 5.4119610e-02 1.2600735e-01 1.3751387e-01 8.3125388e-02 -1.1095792e-02 -1.0000000e-01 -1.4098540e-01 -1.1441228e-01 -3.3014160e-02 6.4203952e-02 1.3065630e-01 1.3449970e-01 7.3892456e-02 -2.2123174e-02 -1.0753764e-01 -1.4142136e-01 -1.0753764e-01 -2.2123174e-02 7.3892456e-02 1.3449970e-01 1.3065630e-01 6.4203952e-02 -3.3014160e-02 -1.1441228e-01 -1.4098540e-01 -1.0000000e-01 -1.1095792e-02 8.3125388e-02 1.3751387e-01 1.2600735e-01 5.4119610e-02 -4.3701602e-02 -1.2058153e-01 -1.3968022e-01 -9.1845824e-02 5.1983624e-16 9.1845824e-02 1.3968022e-01 1.2058153e-01 4.3701602e-02 -5.4119610e-02 -1.2600735e-01 -1.3751387e-01 -8.3125388e-02 1.1095792e-02 1.0000000e-01 1.4098540e-01 1.1441228e-01 3.3014160e-02 -6.4203952e-02 -1.3065630e-01 -1.3449970e-01 -7.3892456e-02 2.2123174e-02 1.0753764e-01 1.4142136e-01 1.0753764e-01 2.2123174e-02 -7.3892456e-02 -1.3449970e-01 -1.3065630e-01 -6.4203952e-02 3.3014160e-02 1.1441228e-01 1.4098540e-01 1.0000000e-01 1.1095792e-02 -8.3125388e-02 -1.3751387e-01 -1.2600735e-01 -5.4119610e-02 4.3701602e-02 1.2058153e-01 1.3968022e-01 9.1845824e-02 1.0000000e-01 1.0459963e-01 1.3308925e-02 -8.4912243e-02 -1.3891640e-01 -1.2058153e-01 -3.9455301e-02 6.2216794e-02 1.3149025e-01 1.3229176e-01 6.4203952e-02 -3.7317285e-02 -1.1940600e-01 -1.3931550e-01 -8.6678147e-02 1.1095792e-02 1.0309173e-01 1.4140391e-01 1.0608172e-01 1.5518775e-02 -8.3125388e-02 -1.3848302e-01 -1.2172730e-01 -4.1583582e-02 6.0214285e-02 1.3065630e-01 1.3306063e-01 6.6175269e-02 -3.5170061e-02 -1.1820101e-01 -1.3968022e-01 -8.8422664e-02 8.8799204e-03 1.0155839e-01 1.4135157e-01 1.0753764e-01 1.7724796e-02 -8.1318023e-02 -1.3801547e-01 -1.2284305e-01 -4.3701602e-02 5.8196919e-02 1.2979010e-01 1.3379667e-01 6.8130258e-02 -3.3014160e-02 -1.1696686e-01 -1.4001049e-01 -9.0145365e-02 6.6618581e-03 1.0000000e-01 1.4126436e-01 1.0896703e-01 1.9926443e-02 -7.9490594e-02 -1.3751387e-01 -1.2392848e-01 -4.5808841e-02 5.6165193e-02 1.2889189e-01 1.3449970e-01 7.0068437e-02 -3.0850113e-02 -1.1570384e-01 -1.4030621e-01 -9.1845824e-02 4.4421521e-03 9.8416932e-02 1.4114229e-01 1.1036953e-01 2.2123174e-02 -7.7643552e-02 -1.3697834e-01 -1.2498333e-01 -4.7904776e-02 5.4119610e-02 1.2796187e-01 1.3516954e-01 7.1989327e-02 -2.8678454e-02 -1.1441228e-01 -1.4056731e-01 -9.3523621e-02 2.2213501e-03 9.6809580e-02 1.4098540e-01 1.1174479e-01 2.4314447e-02 -7.5777352e-02 -1.3640902e-01 -1.2600735e-01 -4.9988892e-02 5.2060674e-02 1.2700028e-01 1.3580604e-01 7.3892456e-02 -2.6499720e-02 -1.1309249e-01 -1.4079372e-01 -9.5178342e-02 1.0000000e-01 1.0155839e-01 4.4421521e-03 -9.5178342e-02 -1.4114229e-01 -1.0753764e-01 -1.3308925e-02 8.8422664e-02 1.4030621e-01 1.1309249e-01 2.2123174e-02 -8.1318023e-02 -1.3891640e-01 -1.1820101e-01 -3.0850113e-02 7.3892456e-02 1.3697834e-01 1.2284305e-01 3.9455301e-02 -6.6175269e-02 -1.3449970e-01 -1.2700028e-01 -4.7904776e-02 5.8196919e-02 1.3149025e-01 1.3065630e-01 5.6165193e-02 -4.9988892e-02 -1.2796187e-01 -1.3379667e-01 -6.4203952e-02 4.1583582e-02 1.2392848e-01 1.3640902e-01 7.1989327e-02 -3.3014160e-02 -1.1940600e-01 -1.3848302e-01 -7.9490594e-02 2.4314447e-02 1.1441228e-01 1.4001049e-01 8.6678147e-02 -1.5518775e-02 -1.0896703e-01 -1.4098540e-01 -9.3523621e-02 6.6618581e-03 1.0309173e-01 1.4140391e-01 1.0000000e-01 2.2213501e-03 -9.6809580e-02 -1.4126436e-01 -1.0608172e-01 -1.1095792e-02 9.0145365e-02 1.4056731e-01 1.1174479e-01 1.9926443e-02 -8.3125388e-02 -1.3931550e-01 -1.1696686e-01 -2.8678454e-02 7.5777352e-02 1.3751387e-01 1.2172730e-01 3.7317285e-02 -6.8130258e-02 -1.3516954e-01 -1.2600735e-01 -4.5808841e-02 6.0214285e-02 1.3229176e-01 1.2979010e-01 5.4119610e-02 -5.2060674e-02 -1.2889189e-01 -1.3306063e-01 -6.2216794e-02 4.3701602e-02 1.2498333e-01 1.3580604e-01 7.0068437e-02 -3.5170061e-02 -1.2058153e-01 -1.3801547e-01 -7.7643552e-02 2.6499720e-02 1.1570384e-01 1.3968022e-01 8.4912243e-02 -1.7724796e-02 -1.1036953e-01 -1.4079372e-01 -9.1845824e-02 8.8799204e-03 1.0459963e-01 1.4135157e-01 9.8416932e-02 1.0000000e-01 9.8416932e-02 -4.4421521e-03 -1.0459963e-01 -1.4114229e-01 -9.1845824e-02 1.3308925e-02 1.1036953e-01 1.4030621e-01 8.4912243e-02 -2.2123174e-02 -1.1570384e-01 -1.3891640e-01 -7.7643552e-02 3.0850113e-02 1.2058153e-01 1.3697834e-01 7.0068437e-02 -3.9455301e-02 -1.2498333e-01 -1.3449970e-01 -6.2216794e-02 4.7904776e-02 1.2889189e-01 1.3149025e-01 5.4119610e-02 -5.6165193e-02 -1.3229176e-01 -1.2796187e-01 -4.5808841e-02 6.4203952e-02 1.3516954e-01 1.2392848e-01 3.7317285e-02 -7.1989327e-02 -1.3751387e-01 -1.1940600e-01 -2.8678454e-02 7.9490594e-02 1.3931550e-01 1.1441228e-01 1.9926443e-02 -8.6678147e-02 -1.4056731e-01 -1.0896703e-01 -1.1095792e-02 9.3523621e-02 1.4126436e-01 1.0309173e-01 2.2213501e-03 -1.0000000e-01 -1.4140391e-01 -9.6809580e-02 6.6618581e-03 1.0608172e-01 1.4098540e-01 9.0145365e-02 -1.5518775e-02 -1.1174479e-01 -1.4001049e-01 -8.3125388e-02 2.4314447e-02 1.1696686e-01 1.3848302e-01 7.5777352e-02 -3.3014160e-02 -1.2172730e-01 -1.3640902e-01 -6.8130258e-02 4.1583582e-02 1.2600735e-01 1.3379667e-01 6.0214285e-02 -4.9988892e-02 -1.2979010e-01 -1.3065630e-01 -5.2060674e-02 5.8196919e-02 1.3306063e-01 1.2700028e-01 4.3701602e-02 -6.6175269e-02 -1.3580604e-01 -1.2284305e-01 -3.5170061e-02 7.3892456e-02 1.3801547e-01 1.1820101e-01 2.6499720e-02 -8.1318023e-02 -1.3968022e-01 -1.1309249e-01 -1.7724796e-02 8.8422664e-02 1.4079372e-01 1.0753764e-01 8.8799204e-03 -9.5178342e-02 -1.4135157e-01 -1.0155839e-01 1.0000000e-01 9.5178342e-02 -1.3308925e-02 -1.1309249e-01 -1.3891640e-01 -7.3892456e-02 3.9455301e-02 1.2700028e-01 1.3149025e-01 4.9988892e-02 -6.4203952e-02 -1.3640902e-01 -1.1940600e-01 -2.4314447e-02 8.6678147e-02 1.4098540e-01 1.0309173e-01 -2.2213501e-03 -1.0608172e-01 -1.4056731e-01 -8.3125388e-02 2.8678454e-02 1.2172730e-01 1.3516954e-01 6.0214285e-02 -5.4119610e-02 -1.3306063e-01 -1.2498333e-01 -3.5170061e-02 7.7643552e-02 1.3968022e-01 1.1036953e-01 8.8799204e-03 -9.8416932e-02 -1.4135157e-01 -9.1845824e-02 1.7724796e-02 1.1570384e-01 1.3801547e-01 7.0068437e-02 -4.3701602e-02 -1.2889189e-01 -1.2979010e-01 -4.5808841e-02 6.8130258e-02 1.3751387e-01 1.1696686e-01 1.9926443e-02 -9.0145365e-02 -1.4126436e-01 -1.0000000e-01 6.6618581e-03 1.0896703e-01 1.4001049e-01 7.9490594e-02 -3.3014160e-02 -1.2392848e-01 -1.3379667e-01 -5.6165193e-02 5.8196919e-02 1.3449970e-01 1.2284305e-01 3.0850113e-02 -8.1318023e-02 -1.4030621e-01 -1.0753764e-01 -4.4421521e-03 1.0155839e-01 1.4114229e-01 8.8422664e-02 -2.2123174e-02 -1.1820101e-01 -1.3697834e-01 -6.6175269e-02 4.7904776e-02 1.3065630e-01 1.2796187e-01 4.1583582e-02 -7.1989327e-02 -1.3848302e-01 -1.1441228e-01 -1.5518775e-02 9.3523621e-02 1.4140391e-01 9.6809580e-02 -1.1095792e-02 -1.1174479e-01 -1.3931550e-01 -7.5777352e-02 3.7317285e-02 1.2600735e-01 1.3229176e-01 5.2060674e-02 -6.2216794e-02 -1.3580604e-01 -1.2058153e-01 -2.6499720e-02 8.4912243e-02 1.4079372e-01 1.0459963e-01 1.0000000e-01 9.1845824e-02 -2.2123174e-02 -1.2058153e-01 -1.3449970e-01 -5.4119610e-02 6.4203952e-02 1.3751387e-01 1.1441228e-01 1.1095792e-02 -1.0000000e-01 -1.4098540e-01 -8.3125388e-02 3.3014160e-02 1.2600735e-01 1.3065630e-01 4.3701602e-02 -7.3892456e-02 -1.3968022e-01 -1.0753764e-01 1.5595963e-16 1.0753764e-01 1.3968022e-01 7.3892456e-02 -4.3701602e-02 -1.3065630e-01 -1.2600735e-01 -3.3014160e-02 8.3125388e-02 1.4098540e-01 1.0000000e-01 -1.1095792e-02 -1.1441228e-01 -1.3751387e-01 -6.4203952e-02 5.4119610e-02 1.3449970e-01 1.2058153e-01 2.2123174e-02 -9.1845824e-02 -1.4142136e-01 -9.1845824e-02 2.2123174e-02 1.2058153e-01 1.3449970e-01 5.4119610e-02 -6.4203952e-02 -1.3751387e-01 -1.1441228e-01 -1.1095792e-02 1.0000000e-01 1.4098540e-01 8.3125388e-02 -3.3014160e-02 -1.2600735e-01 -1.3065630e-01 -4.3701602e-02 7.3892456e-02 1.3968022e-01 1.0753764e-01 3.4550705e-17 -1.0753764e-01 -1.3968022e-01 -7.3892456e-02 4.3701602e-02 1.3065630e-01 1.2600735e-01 3.3014160e-02 -8.3125388e-02 -1.4098540e-01 -1.0000000e-01 1.1095792e-02 1.1441228e-01 1.3751387e-01 6.4203952e-02 -5.4119610e-02 -1.3449970e-01 -1.2058153e-01 -2.2123174e-02 9.1845824e-02 1.4142136e-01 9.1845824e-02 -2.2123174e-02 -1.2058153e-01 -1.3449970e-01 -5.4119610e-02 6.4203952e-02 1.3751387e-01 1.1441228e-01 1.1095792e-02 -1.0000000e-01 -1.4098540e-01 -8.3125388e-02 3.3014160e-02 1.2600735e-01 1.3065630e-01 4.3701602e-02 -7.3892456e-02 -1.3968022e-01 -1.0753764e-01 1.0000000e-01 8.8422664e-02 -3.0850113e-02 -1.2700028e-01 -1.2796187e-01 -3.3014160e-02 8.6678147e-02 1.4140391e-01 9.0145365e-02 -2.8678454e-02 -1.2600735e-01 -1.2889189e-01 -3.5170061e-02 8.4912243e-02 1.4135157e-01 9.1845824e-02 -2.6499720e-02 -1.2498333e-01 -1.2979010e-01 -3.7317285e-02 8.3125388e-02 1.4126436e-01 9.3523621e-02 -2.4314447e-02 -1.2392848e-01 -1.3065630e-01 -3.9455301e-02 8.1318023e-02 1.4114229e-01 9.5178342e-02 -2.2123174e-02 -1.2284305e-01 -1.3149025e-01 -4.1583582e-02 7.9490594e-02 1.4098540e-01 9.6809580e-02 -1.9926443e-02 -1.2172730e-01 -1.3229176e-01 -4.3701602e-02 7.7643552e-02 1.4079372e-01 9.8416932e-02 -1.7724796e-02 -1.2058153e-01 -1.3306063e-01 -4.5808841e-02 7.5777352e-02 1.4056731e-01 1.0000000e-01 -1.5518775e-02 -1.1940600e-01 -1.3379667e-01 -4.7904776e-02 7.3892456e-02 1.4030621e-01 1.0155839e-01 -1.3308925e-02 -1.1820101e-01 -1.3449970e-01 -4.9988892e-02 7.1989327e-02 1.4001049e-01 1.0309173e-01 -1.1095792e-02 -1.1696686e-01 -1.3516954e-01 -5.2060674e-02 7.0068437e-02 1.3968022e-01 1.0459963e-01 -8.8799204e-03 -1.1570384e-01 -1.3580604e-01 -5.4119610e-02 6.8130258e-02 1.3931550e-01 1.0608172e-01 -6.6618581e-03 -1.1441228e-01 -1.3640902e-01 -5.6165193e-02 6.6175269e-02 1.3891640e-01 1.0753764e-01 -4.4421521e-03 -1.1309249e-01 -1.3697834e-01 -5.8196919e-02 6.4203952e-02 1.3848302e-01 1.0896703e-01 -2.2213501e-03 -1.1174479e-01 -1.3751387e-01 -6.0214285e-02 6.2216794e-02 1.3801547e-01 1.1036953e-01 1.0000000e-01 8.4912243e-02 -3.9455301e-02 -1.3229176e-01 -1.1940600e-01 -1.1095792e-02 1.0608172e-01 1.3848302e-01 6.0214285e-02 -6.6175269e-02 -1.3968022e-01 -1.0155839e-01 1.7724796e-02 1.2284305e-01 1.2979010e-01 3.3014160e-02 -9.0145365e-02 -1.4126436e-01 -7.9490594e-02 4.5808841e-02 1.3449970e-01 1.1570384e-01 4.4421521e-03 -1.1036953e-01 -1.3697834e-01 -5.4119610e-02 7.1989327e-02 1.4056731e-01 9.6809580e-02 -2.4314447e-02 -1.2600735e-01 -1.2700028e-01 -2.6499720e-02 9.5178342e-02 1.4079372e-01 7.3892456e-02 -5.2060674e-02 -1.3640902e-01 -1.1174479e-01 2.2213501e-03 1.1441228e-01 1.3516954e-01 4.7904776e-02 -7.7643552e-02 -1.4114229e-01 -9.1845824e-02 3.0850113e-02 1.2889189e-01 1.2392848e-01 1.9926443e-02 -1.0000000e-01 -1.4001049e-01 -6.8130258e-02 5.8196919e-02 1.3801547e-01 1.0753764e-01 -8.8799204e-03 -1.1820101e-01 -1.3306063e-01 -4.1583582e-02 8.3125388e-02 1.4140391e-01 8.6678147e-02 -3.7317285e-02 -1.3149025e-01 -1.2058153e-01 -1.3308925e-02 1.0459963e-01 1.3891640e-01 6.2216794e-02 -6.4203952e-02 -1.3931550e-01 -1.0309173e-01 1.5518775e-02 1.2172730e-01 1.3065630e-01 3.5170061e-02 -8.8422664e-02 -1.4135157e-01 -8.1318023e-02 4.3701602e-02 1.3379667e-01 1.1696686e-01 6.6618581e-03 -1.0896703e-01 -1.3751387e-01 -5.6165193e-02 7.0068437e-02 1.4030621e-01 9.8416932e-02 -2.2123174e-02 -1.2498333e-01 -1.2796187e-01 -2.8678454e-02 9.3523621e-02 1.4098540e-01 7.5777352e-02 -4.9988892e-02 -1.3580604e-01 -1.1309249e-01 1.0000000e-01 8.1318023e-02 -4.7904776e-02 -1.3640902e-01 -1.0896703e-01 1.1095792e-02 1.2172730e-01 1.2889189e-01 2.6499720e-02 -9.8416932e-02 -1.3968022e-01 -6.2216794e-02 6.8130258e-02 1.4056731e-01 9.3523621e-02 -3.3014160e-02 -1.3149025e-01 -1.1820101e-01 -4.4421521e-03 1.1309249e-01 1.3449970e-01 4.1583582e-02 -8.6678147e-02 -1.4126436e-01 -7.5777352e-02 5.4119610e-02 1.3801547e-01 1.0459963e-01 -1.7724796e-02 -1.2498333e-01 -1.2600735e-01 -1.9926443e-02 1.0309173e-01 1.3848302e-01 5.6165193e-02 -7.3892456e-02 -1.4114229e-01 -8.8422664e-02 3.9455301e-02 1.3379667e-01 1.1441228e-01 -2.2213501e-03 -1.1696686e-01 -1.3229176e-01 -3.5170061e-02 9.1845824e-02 1.4079372e-01 7.0068437e-02 -6.0214285e-02 -1.3931550e-01 -1.0000000e-01 2.4314447e-02 1.2796187e-01 1.2284305e-01 1.3308925e-02 -1.0753764e-01 -1.3697834e-01 -4.9988892e-02 7.9490594e-02 1.4140391e-01 8.3125388e-02 -4.5808841e-02 -1.3580604e-01 -1.1036953e-01 8.8799204e-03 1.2058153e-01 1.2979010e-01 2.8678454e-02 -9.6809580e-02 -1.4001049e-01 -6.4203952e-02 6.6175269e-02 1.4030621e-01 9.5178342e-02 -3.0850113e-02 -1.3065630e-01 -1.1940600e-01 -6.6618581e-03 1.1174479e-01 1.3516954e-01 4.3701602e-02 -8.4912243e-02 -1.4135157e-01 -7.7643552e-02 5.2060674e-02 1.3751387e-01 1.0608172e-01 -1.5518775e-02 -1.2392848e-01 -1.2700028e-01 -2.2123174e-02 1.0155839e-01 1.3891640e-01 5.8196919e-02 -7.1989327e-02 -1.4098540e-01 -9.0145365e-02 3.7317285e-02 1.3306063e-01 1.1570384e-01 1.0000000e-01 7.7643552e-02 -5.6165193e-02 -1.3931550e-01 -9.6809580e-02 3.3014160e-02 1.3306063e-01 1.1309249e-01 -8.8799204e-03 -1.2284305e-01 -1.2600735e-01 -1.5518775e-02 1.0896703e-01 1.3516954e-01 3.9455301e-02 -9.1845824e-02 -1.4030621e-01 -6.2216794e-02 7.1989327e-02 1.4126436e-01 8.3125388e-02 -4.9988892e-02 -1.3801547e-01 -1.0155839e-01 2.6499720e-02 1.3065630e-01 1.1696686e-01 -2.2213501e-03 -1.1940600e-01 -1.2889189e-01 -2.2123174e-02 1.0459963e-01 1.3697834e-01 4.5808841e-02 -8.6678147e-02 -1.4098540e-01 -6.8130258e-02 6.6175269e-02 1.4079372e-01 8.8422664e-02 -4.3701602e-02 -1.3640902e-01 -1.0608172e-01 1.9926443e-02 1.2796187e-01 1.2058153e-01 4.4421521e-03 -1.1570384e-01 -1.3149025e-01 -2.8678454e-02 1.0000000e-01 1.3848302e-01 5.2060674e-02 -8.1318023e-02 -1.4135157e-01 -7.3892456e-02 6.0214285e-02 1.4001049e-01 9.3523621e-02 -3.7317285e-02 -1.3449970e-01 -1.1036953e-01 1.3308925e-02 1.2498333e-01 1.2392848e-01 1.1095792e-02 -1.1174479e-01 -1.3379667e-01 -3.5170061e-02 9.5178342e-02 1.3968022e-01 5.8196919e-02 -7.5777352e-02 -1.4140391e-01 -7.9490594e-02 5.4119610e-02 1.3891640e-01 9.8416932e-02 -3.0850113e-02 -1.3229176e-01 -1.1441228e-01 6.6618581e-03 1.2172730e-01 1.2700028e-01 1.7724796e-02 -1.0753764e-01 -1.3580604e-01 -4.1583582e-02 9.0145365e-02 1.4056731e-01 6.4203952e-02 -7.0068437e-02 -1.4114229e-01 -8.4912243e-02 4.7904776e-02 1.3751387e-01 1.0309173e-01 -2.4314447e-02 -1.2979010e-01 -1.1820101e-01 1.0000000e-01 7.3892456e-02 -6.4203952e-02 -1.4098540e-01 -8.3125388e-02 5.4119610e-02 1.3968022e-01 9.1845824e-02 -4.3701602e-02 -1.3751387e-01 -1.0000000e-01 3.3014160e-02 1.3449970e-01 1.0753764e-01 -2.2123174e-02 -1.3065630e-01 -1.1441228e-01 1.1095792e-02 1.2600735e-01 1.2058153e-01 3.6378908e-16 -1.2058153e-01 -1.2600735e-01 -1.1095792e-02 1.1441228e-01 1.3065630e-01 2.2123174e-02 -1.0753764e-01 -1.3449970e-01 -3.3014160e-02 1.0000000e-01 1.3751387e-01 4.3701602e-02 -9.1845824e-02 -1.3968022e-01 -5.4119610e-02 8.3125388e-02 1.4098540e-01 6.4203952e-02 -7.3892456e-02 -1.4142136e-01 -7.3892456e-02 6.4203952e-02 1.4098540e-01 8.3125388e-02 -5.4119610e-02 -1.3968022e-01 -9.1845824e-02 4.3701602e-02 1.3751387e-01 1.0000000e-01 -3.3014160e-02 -1.3449970e-01 -1.0753764e-01 2.2123174e-02 1.3065630e-01 1.1441228e-01 -1.1095792e-02 -1.2600735e-01 -1.2058153e-01 -1.5937968e-15 1.2058153e-01 1.2600735e-01 1.1095792e-02 -1.1441228e-01 -1.3065630e-01 -2.2123174e-02 1.0753764e-01 1.3449970e-01 3.3014160e-02 -1.0000000e-01 -1.3751387e-01 -4.3701602e-02 9.1845824e-02 1.3968022e-01 5.4119610e-02 -8.3125388e-02 -1.4098540e-01 -6.4203952e-02 7.3892456e-02 1.4142136e-01 7.3892456e-02 -6.4203952e-02 -1.4098540e-01 -8.3125388e-02 5.4119610e-02 1.3968022e-01 9.1845824e-02 -4.3701602e-02 -1.3751387e-01 -1.0000000e-01 3.3014160e-02 1.3449970e-01 1.0753764e-01 -2.2123174e-02 -1.3065630e-01 -1.1441228e-01 1.1095792e-02 1.2600735e-01 1.2058153e-01 1.0000000e-01 7.0068437e-02 -7.1989327e-02 -1.4140391e-01 -6.8130258e-02 7.3892456e-02 1.4135157e-01 6.6175269e-02 -7.5777352e-02 -1.4126436e-01 -6.4203952e-02 7.7643552e-02 1.4114229e-01 6.2216794e-02 -7.9490594e-02 -1.4098540e-01 -6.0214285e-02 8.1318023e-02 1.4079372e-01 5.8196919e-02 -8.3125388e-02 -1.4056731e-01 -5.6165193e-02 8.4912243e-02 1.4030621e-01 5.4119610e-02 -8.6678147e-02 -1.4001049e-01 -5.2060674e-02 8.8422664e-02 1.3968022e-01 4.9988892e-02 -9.0145365e-02 -1.3931550e-01 -4.7904776e-02 9.1845824e-02 1.3891640e-01 4.5808841e-02 -9.3523621e-02 -1.3848302e-01 -4.3701602e-02 9.5178342e-02 1.3801547e-01 4.1583582e-02 -9.6809580e-02 -1.3751387e-01 -3.9455301e-02 9.8416932e-02 1.3697834e-01 3.7317285e-02 -1.0000000e-01 -1.3640902e-01 -3.5170061e-02 1.0155839e-01 1.3580604e-01 3.3014160e-02 -1.0309173e-01 -1.3516954e-01 -3.0850113e-02 1.0459963e-01 1.3449970e-01 2.8678454e-02 -1.0608172e-01 -1.3379667e-01 -2.6499720e-02 1.0753764e-01 1.3306063e-01 2.4314447e-02 -1.0896703e-01 -1.3229176e-01 -2.2123174e-02 1.1036953e-01 1.3149025e-01 1.9926443e-02 -1.1174479e-01 -1.3065630e-01 -1.7724796e-02 1.1309249e-01 1.2979010e-01 1.5518775e-02 -1.1441228e-01 -1.2889189e-01 -1.3308925e-02 1.1570384e-01 1.2796187e-01 1.1095792e-02 -1.1696686e-01 -1.2700028e-01 -8.8799204e-03 1.1820101e-01 1.2600735e-01 6.6618581e-03 -1.1940600e-01 -1.2498333e-01 -4.4421521e-03 1.2058153e-01 1.2392848e-01 2.2213501e-03 -1.2172730e-01 -1.2284305e-01 1.0000000e-01 6.6175269e-02 -7.9490594e-02 -1.4056731e-01 -5.2060674e-02 9.1845824e-02 1.3801547e-01 3.7317285e-02 -1.0309173e-01 -1.3379667e-01 -2.2123174e-02 1.1309249e-01 1.2796187e-01 6.6618581e-03 -1.2172730e-01 -1.2058153e-01 8.8799204e-03 1.2889189e-01 1.1174479e-01 -2.4314447e-02 -1.3449970e-01 -1.0155839e-01 3.9455301e-02 1.3848302e-01 9.0145365e-02 -5.4119610e-02 -1.4079372e-01 -7.7643552e-02 6.8130258e-02 1.4140391e-01 6.4203952e-02 -8.1318023e-02 -1.4030621e-01 -4.9988892e-02 9.3523621e-02 1.3751387e-01 3.5170061e-02 -1.0459963e-01 -1.3306063e-01 -1.9926443e-02 1.1441228e-01 1.2700028e-01 4.4421521e-03 -1.2284305e-01 -1.1940600e-01 1.1095792e-02 1.2979010e-01 1.1036953e-01 -2.6499720e-02 -1.3516954e-01 -1.0000000e-01 4.1583582e-02 1.3891640e-01 8.8422664e-02 -5.6165193e-02 -1.4098540e-01 -7.5777352e-02 7.0068437e-02 1.4135157e-01 6.2216794e-02 -8.3125388e-02 -1.4001049e-01 -4.7904776e-02 9.5178342e-02 1.3697834e-01 3.3014160e-02 -1.0608172e-01 -1.3229176e-01 -1.7724796e-02 1.1570384e-01 1.2600735e-01 2.2213501e-03 -1.2392848e-01 -1.1820101e-01 1.3308925e-02 1.3065630e-01 1.0896703e-01 -2.8678454e-02 -1.3580604e-01 -9.8416932e-02 4.3701602e-02 1.3931550e-01 8.6678147e-02 -5.8196919e-02 -1.4114229e-01 -7.3892456e-02 7.1989327e-02 1.4126436e-01 6.0214285e-02 -8.4912243e-02 -1.3968022e-01 -4.5808841e-02 9.6809580e-02 1.3640902e-01 3.0850113e-02 -1.0753764e-01 -1.3149025e-01 -1.5518775e-02 1.1696686e-01 1.2498333e-01 1.0000000e-01 6.2216794e-02 -8.6678147e-02 -1.3848302e-01 -3.5170061e-02 1.0753764e-01 1.2979010e-01 6.6618581e-03 -1.2392848e-01 -1.1570384e-01 2.2123174e-02 1.3516954e-01 9.6809580e-02 -4.9988892e-02 -1.4079372e-01 -7.3892456e-02 7.5777352e-02 1.4056731e-01 4.7904776e-02 -9.8416932e-02 -1.3449970e-01 -1.9926443e-02 1.1696686e-01 1.2284305e-01 -8.8799204e-03 -1.3065630e-01 -1.0608172e-01 3.7317285e-02 1.3891640e-01 8.4912243e-02 -6.4203952e-02 -1.4140391e-01 -6.0214285e-02 8.8422664e-02 1.3801547e-01 3.3014160e-02 -1.0896703e-01 -1.2889189e-01 -4.4421521e-03 1.2498333e-01 1.1441228e-01 -2.4314447e-02 -1.3580604e-01 -9.5178342e-02 5.2060674e-02 1.4098540e-01 7.1989327e-02 -7.7643552e-02 -1.4030621e-01 -4.5808841e-02 1.0000000e-01 1.3379667e-01 1.7724796e-02 -1.1820101e-01 -1.2172730e-01 1.1095792e-02 1.3149025e-01 1.0459963e-01 -3.9455301e-02 -1.3931550e-01 -8.3125388e-02 6.6175269e-02 1.4135157e-01 5.8196919e-02 -9.0145365e-02 -1.3751387e-01 -3.0850113e-02 1.1036953e-01 1.2796187e-01 2.2213501e-03 -1.2600735e-01 -1.1309249e-01 2.6499720e-02 1.3640902e-01 9.3523621e-02 -5.4119610e-02 -1.4114229e-01 -7.0068437e-02 7.9490594e-02 1.4001049e-01 4.3701602e-02 -1.0155839e-01 -1.3306063e-01 -1.5518775e-02 1.1940600e-01 1.2058153e-01 -1.3308925e-02 -1.3229176e-01 -1.0309173e-01 4.1583582e-02 1.3968022e-01 8.1318023e-02 -6.8130258e-02 -1.4126436e-01 -5.6165193e-02 9.1845824e-02 1.3697834e-01 2.8678454e-02 -1.1174479e-01 -1.2700028e-01 1.0000000e-01 5.8196919e-02 -9.3523621e-02 -1.3516954e-01 -1.7724796e-02 1.2058153e-01 1.1696686e-01 -2.4314447e-02 -1.3697834e-01 -8.8422664e-02 6.4203952e-02 1.4126436e-01 5.2060674e-02 -9.8416932e-02 -1.3306063e-01 -1.1095792e-02 1.2392848e-01 1.1309249e-01 -3.0850113e-02 -1.3848302e-01 -8.3125388e-02 7.0068437e-02 1.4079372e-01 4.5808841e-02 -1.0309173e-01 -1.3065630e-01 -4.4421521e-03 1.2700028e-01 1.0896703e-01 -3.7317285e-02 -1.3968022e-01 -7.7643552e-02 7.5777352e-02 1.4001049e-01 3.9455301e-02 -1.0753764e-01 -1.2796187e-01 2.2213501e-03 1.2979010e-01 1.0459963e-01 -4.3701602e-02 -1.4056731e-01 -7.1989327e-02 8.1318023e-02 1.3891640e-01 3.3014160e-02 -1.1174479e-01 -1.2498333e-01 8.8799204e-03 1.3229176e-01 1.0000000e-01 -4.9988892e-02 -1.4114229e-01 -6.6175269e-02 8.6678147e-02 1.3751387e-01 2.6499720e-02 -1.1570384e-01 -1.2172730e-01 1.5518775e-02 1.3449970e-01 9.5178342e-02 -5.6165193e-02 -1.4140391e-01 -6.0214285e-02 9.1845824e-02 1.3580604e-01 1.9926443e-02 -1.1940600e-01 -1.1820101e-01 2.2123174e-02 1.3640902e-01 9.0145365e-02 -6.2216794e-02 -1.4135157e-01 -5.4119610e-02 9.6809580e-02 1.3379667e-01 1.3308925e-02 -1.2284305e-01 -1.1441228e-01 2.8678454e-02 1.3801547e-01 8.4912243e-02 -6.8130258e-02 -1.4098540e-01 -4.7904776e-02 1.0155839e-01 1.3149025e-01 6.6618581e-03 -1.2600735e-01 -1.1036953e-01 3.5170061e-02 1.3931550e-01 7.9490594e-02 -7.3892456e-02 -1.4030621e-01 -4.1583582e-02 1.0608172e-01 1.2889189e-01 1.0000000e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -2.5978682e-17 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 7.7936045e-17 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -3.8110820e-16 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 -6.9364022e-17 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 5.1983624e-16 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 1.0394099e-15 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 4.1592152e-16 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 2.1481838e-15 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 -6.9285238e-16 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 1.2472393e-15 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 2.0809206e-16 1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.4142136e-01 -5.4119610e-02 1.0000000e-01 1.3065630e-01 2.3560132e-15 -1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.4142136e-01 5.4119610e-02 -1.0000000e-01 -1.3065630e-01 1.0000000e-01 4.9988892e-02 -1.0608172e-01 -1.2498333e-01 1.7724796e-02 1.3751387e-01 7.9490594e-02 -8.1318023e-02 -1.3697834e-01 -1.5518775e-02 1.2600735e-01 1.0459963e-01 -5.2060674e-02 -1.4140391e-01 -4.7904776e-02 1.0753764e-01 1.2392848e-01 -1.9926443e-02 -1.3801547e-01 -7.7643552e-02 8.3125388e-02 1.3640902e-01 1.3308925e-02 -1.2700028e-01 -1.0309173e-01 5.4119610e-02 1.4135157e-01 4.5808841e-02 -1.0896703e-01 -1.2284305e-01 2.2123174e-02 1.3848302e-01 7.5777352e-02 -8.4912243e-02 -1.3580604e-01 -1.1095792e-02 1.2796187e-01 1.0155839e-01 -5.6165193e-02 -1.4126436e-01 -4.3701602e-02 1.1036953e-01 1.2172730e-01 -2.4314447e-02 -1.3891640e-01 -7.3892456e-02 8.6678147e-02 1.3516954e-01 8.8799204e-03 -1.2889189e-01 -1.0000000e-01 5.8196919e-02 1.4114229e-01 4.1583582e-02 -1.1174479e-01 -1.2058153e-01 2.6499720e-02 1.3931550e-01 7.1989327e-02 -8.8422664e-02 -1.3449970e-01 -6.6618581e-03 1.2979010e-01 9.8416932e-02 -6.0214285e-02 -1.4098540e-01 -3.9455301e-02 1.1309249e-01 1.1940600e-01 -2.8678454e-02 -1.3968022e-01 -7.0068437e-02 9.0145365e-02 1.3379667e-01 4.4421521e-03 -1.3065630e-01 -9.6809580e-02 6.2216794e-02 1.4079372e-01 3.7317285e-02 -1.1441228e-01 -1.1820101e-01 3.0850113e-02 1.4001049e-01 6.8130258e-02 -9.1845824e-02 -1.3306063e-01 -2.2213501e-03 1.3149025e-01 9.5178342e-02 -6.4203952e-02 -1.4056731e-01 -3.5170061e-02 1.1570384e-01 1.1696686e-01 -3.3014160e-02 -1.4030621e-01 -6.6175269e-02 9.3523621e-02 1.3229176e-01 1.0000000e-01 4.5808841e-02 -1.1174479e-01 -1.1820101e-01 3.5170061e-02 1.4098540e-01 5.6165193e-02 -1.0459963e-01 -1.2392848e-01 2.4314447e-02 1.3968022e-01 6.6175269e-02 -9.6809580e-02 -1.2889189e-01 1.3308925e-02 1.3751387e-01 7.5777352e-02 -8.8422664e-02 -1.3306063e-01 2.2213501e-03 1.3449970e-01 8.4912243e-02 -7.9490594e-02 -1.3640902e-01 -8.8799204e-03 1.3065630e-01 9.3523621e-02 -7.0068437e-02 -1.3891640e-01 -1.9926443e-02 1.2600735e-01 1.0155839e-01 -6.0214285e-02 -1.4056731e-01 -3.0850113e-02 1.2058153e-01 1.0896703e-01 -4.9988892e-02 -1.4135157e-01 -4.1583582e-02 1.1441228e-01 1.1570384e-01 -3.9455301e-02 -1.4126436e-01 -5.2060674e-02 1.0753764e-01 1.2172730e-01 -2.8678454e-02 -1.4030621e-01 -6.2216794e-02 1.0000000e-01 1.2700028e-01 -1.7724796e-02 -1.3848302e-01 -7.1989327e-02 9.1845824e-02 1.3149025e-01 -6.6618581e-03 -1.3580604e-01 -8.1318023e-02 8.3125388e-02 1.3516954e-01 4.4421521e-03 -1.3229176e-01 -9.0145365e-02 7.3892456e-02 1.3801547e-01 1.5518775e-02 -1.2796187e-01 -9.8416932e-02 6.4203952e-02 1.4001049e-01 2.6499720e-02 -1.2284305e-01 -1.0608172e-01 5.4119610e-02 1.4114229e-01 3.7317285e-02 -1.1696686e-01 -1.1309249e-01 4.3701602e-02 1.4140391e-01 4.7904776e-02 -1.1036953e-01 -1.1940600e-01 3.3014160e-02 1.4079372e-01 5.8196919e-02 -1.0309173e-01 -1.2498333e-01 2.2123174e-02 1.3931550e-01 6.8130258e-02 -9.5178342e-02 -1.2979010e-01 1.1095792e-02 1.3697834e-01 7.7643552e-02 -8.6678147e-02 -1.3379667e-01 1.0000000e-01 4.1583582e-02 -1.1696686e-01 -1.1036953e-01 5.2060674e-02 1.4098540e-01 3.0850113e-02 -1.2284305e-01 -1.0309173e-01 6.2216794e-02 1.3968022e-01 1.9926443e-02 -1.2796187e-01 -9.5178342e-02 7.1989327e-02 1.3751387e-01 8.8799204e-03 -1.3229176e-01 -8.6678147e-02 8.1318023e-02 1.3449970e-01 -2.2213501e-03 -1.3580604e-01 -7.7643552e-02 9.0145365e-02 1.3065630e-01 -1.3308925e-02 -1.3848302e-01 -6.8130258e-02 9.8416932e-02 1.2600735e-01 -2.4314447e-02 -1.4030621e-01 -5.8196919e-02 1.0608172e-01 1.2058153e-01 -3.5170061e-02 -1.4126436e-01 -4.7904776e-02 1.1309249e-01 1.1441228e-01 -4.5808841e-02 -1.4135157e-01 -3.7317285e-02 1.1940600e-01 1.0753764e-01 -5.6165193e-02 -1.4056731e-01 -2.6499720e-02 1.2498333e-01 1.0000000e-01 -6.6175269e-02 -1.3891640e-01 -1.5518775e-02 1.2979010e-01 9.1845824e-02 -7.5777352e-02 -1.3640902e-01 -4.4421521e-03 1.3379667e-01 8.3125388e-02 -8.4912243e-02 -1.3306063e-01 6.6618581e-03 1.3697834e-01 7.3892456e-02 -9.3523621e-02 -1.2889189e-01 1.7724796e-02 1.3931550e-01 6.4203952e-02 -1.0155839e-01 -1.2392848e-01 2.8678454e-02 1.4079372e-01 5.4119610e-02 -1.0896703e-01 -1.1820101e-01 3.9455301e-02 1.4140391e-01 4.3701602e-02 -1.1570384e-01 -1.1174479e-01 4.9988892e-02 1.4114229e-01 3.3014160e-02 -1.2172730e-01 -1.0459963e-01 6.0214285e-02 1.4001049e-01 2.2123174e-02 -1.2700028e-01 -9.6809580e-02 7.0068437e-02 1.3801547e-01 1.1095792e-02 -1.3149025e-01 -8.8422664e-02 7.9490594e-02 1.3516954e-01 1.0000000e-01 3.7317285e-02 -1.2172730e-01 -1.0155839e-01 6.8130258e-02 1.3751387e-01 4.4421521e-03 -1.3516954e-01 -7.5777352e-02 9.5178342e-02 1.2600735e-01 -2.8678454e-02 -1.4114229e-01 -4.5808841e-02 1.1696686e-01 1.0753764e-01 -6.0214285e-02 -1.3931550e-01 -1.3308925e-02 1.3229176e-01 8.3125388e-02 -8.8422664e-02 -1.2979010e-01 1.9926443e-02 1.4030621e-01 5.4119610e-02 -1.1174479e-01 -1.1309249e-01 5.2060674e-02 1.4056731e-01 2.2123174e-02 -1.2889189e-01 -9.0145365e-02 8.1318023e-02 1.3306063e-01 -1.1095792e-02 -1.3891640e-01 -6.2216794e-02 1.0608172e-01 1.1820101e-01 -4.3701602e-02 -1.4126436e-01 -3.0850113e-02 1.2498333e-01 9.6809580e-02 -7.3892456e-02 -1.3580604e-01 2.2213501e-03 1.3697834e-01 7.0068437e-02 -1.0000000e-01 -1.2284305e-01 3.5170061e-02 1.4140391e-01 3.9455301e-02 -1.2058153e-01 -1.0309173e-01 6.6175269e-02 1.3801547e-01 6.6618581e-03 -1.3449970e-01 -7.7643552e-02 9.3523621e-02 1.2700028e-01 -2.6499720e-02 -1.4098540e-01 -4.7904776e-02 1.1570384e-01 1.0896703e-01 -5.8196919e-02 -1.3968022e-01 -1.5518775e-02 1.3149025e-01 8.4912243e-02 -8.6678147e-02 -1.3065630e-01 1.7724796e-02 1.4001049e-01 5.6165193e-02 -1.1036953e-01 -1.1441228e-01 4.9988892e-02 1.4079372e-01 2.4314447e-02 -1.2796187e-01 -9.1845824e-02 7.9490594e-02 1.3379667e-01 -8.8799204e-03 -1.3848302e-01 -6.4203952e-02 1.0459963e-01 1.1940600e-01 -4.1583582e-02 -1.4135157e-01 -3.3014160e-02 1.2392848e-01 9.8416932e-02 -7.1989327e-02 -1.3640902e-01 1.0000000e-01 3.3014160e-02 -1.2600735e-01 -9.1845824e-02 8.3125388e-02 1.3065630e-01 -2.2123174e-02 -1.4098540e-01 -4.3701602e-02 1.2058153e-01 1.0000000e-01 -7.3892456e-02 -1.3449970e-01 1.1095792e-02 1.3968022e-01 5.4119610e-02 -1.1441228e-01 -1.0753764e-01 6.4203952e-02 1.3751387e-01 3.9842732e-16 -1.3751387e-01 -6.4203952e-02 1.0753764e-01 1.1441228e-01 -5.4119610e-02 -1.3968022e-01 -1.1095792e-02 1.3449970e-01 7.3892456e-02 -1.0000000e-01 -1.2058153e-01 4.3701602e-02 1.4098540e-01 2.2123174e-02 -1.3065630e-01 -8.3125388e-02 9.1845824e-02 1.2600735e-01 -3.3014160e-02 -1.4142136e-01 -3.3014160e-02 1.2600735e-01 9.1845824e-02 -8.3125388e-02 -1.3065630e-01 2.2123174e-02 1.4098540e-01 4.3701602e-02 -1.2058153e-01 -1.0000000e-01 7.3892456e-02 1.3449970e-01 -1.1095792e-02 -1.3968022e-01 -5.4119610e-02 1.1441228e-01 1.0753764e-01 -6.4203952e-02 -1.3751387e-01 -6.9285238e-16 1.3751387e-01 6.4203952e-02 -1.0753764e-01 -1.1441228e-01 5.4119610e-02 1.3968022e-01 1.1095792e-02 -1.3449970e-01 -7.3892456e-02 1.0000000e-01 1.2058153e-01 -4.3701602e-02 -1.4098540e-01 -2.2123174e-02 1.3065630e-01 8.3125388e-02 -9.1845824e-02 -1.2600735e-01 3.3014160e-02 1.4142136e-01 3.3014160e-02 -1.2600735e-01 -9.1845824e-02 8.3125388e-02 1.3065630e-01 -2.2123174e-02 -1.4098540e-01 -4.3701602e-02 1.2058153e-01 1.0000000e-01 -7.3892456e-02 -1.3449970e-01 1.1095792e-02 1.3968022e-01 5.4119610e-02 -1.1441228e-01 -1.0753764e-01 6.4203952e-02 1.3751387e-01 1.0000000e-01 2.8678454e-02 -1.2979010e-01 -8.1318023e-02 9.6809580e-02 1.2058153e-01 -4.7904776e-02 -1.4001049e-01 -8.8799204e-03 1.3640902e-01 6.4203952e-02 -1.1036953e-01 -1.0896703e-01 6.6175269e-02 1.3580604e-01 -1.1095792e-02 -1.4030621e-01 -4.5808841e-02 1.2172730e-01 9.5178342e-02 -8.3125388e-02 -1.2889189e-01 3.0850113e-02 1.4140391e-01 2.6499720e-02 -1.3065630e-01 -7.9490594e-02 9.8416932e-02 1.1940600e-01 -4.9988892e-02 -1.3968022e-01 -6.6618581e-03 1.3697834e-01 6.2216794e-02 -1.1174479e-01 -1.0753764e-01 6.8130258e-02 1.3516954e-01 -1.3308925e-02 -1.4056731e-01 -4.3701602e-02 1.2284305e-01 9.3523621e-02 -8.4912243e-02 -1.2796187e-01 3.3014160e-02 1.4135157e-01 2.4314447e-02 -1.3149025e-01 -7.7643552e-02 1.0000000e-01 1.1820101e-01 -5.2060674e-02 -1.3931550e-01 -4.4421521e-03 1.3751387e-01 6.0214285e-02 -1.1309249e-01 -1.0608172e-01 7.0068437e-02 1.3449970e-01 -1.5518775e-02 -1.4079372e-01 -4.1583582e-02 1.2392848e-01 9.1845824e-02 -8.6678147e-02 -1.2700028e-01 3.5170061e-02 1.4126436e-01 2.2123174e-02 -1.3229176e-01 -7.5777352e-02 1.0155839e-01 1.1696686e-01 -5.4119610e-02 -1.3891640e-01 -2.2213501e-03 1.3801547e-01 5.8196919e-02 -1.1441228e-01 -1.0459963e-01 7.1989327e-02 1.3379667e-01 -1.7724796e-02 -1.4098540e-01 -3.9455301e-02 1.2498333e-01 9.0145365e-02 -8.8422664e-02 -1.2600735e-01 3.7317285e-02 1.4114229e-01 1.9926443e-02 -1.3306063e-01 -7.3892456e-02 1.0309173e-01 1.1570384e-01 -5.6165193e-02 -1.3848302e-01 1.0000000e-01 2.4314447e-02 -1.3306063e-01 -7.0068437e-02 1.0896703e-01 1.0753764e-01 -7.1989327e-02 -1.3229176e-01 2.6499720e-02 1.4140391e-01 2.2123174e-02 -1.3379667e-01 -6.8130258e-02 1.1036953e-01 1.0608172e-01 -7.3892456e-02 -1.3149025e-01 2.8678454e-02 1.4135157e-01 1.9926443e-02 -1.3449970e-01 -6.6175269e-02 1.1174479e-01 1.0459963e-01 -7.5777352e-02 -1.3065630e-01 3.0850113e-02 1.4126436e-01 1.7724796e-02 -1.3516954e-01 -6.4203952e-02 1.1309249e-01 1.0309173e-01 -7.7643552e-02 -1.2979010e-01 3.3014160e-02 1.4114229e-01 1.5518775e-02 -1.3580604e-01 -6.2216794e-02 1.1441228e-01 1.0155839e-01 -7.9490594e-02 -1.2889189e-01 3.5170061e-02 1.4098540e-01 1.3308925e-02 -1.3640902e-01 -6.0214285e-02 1.1570384e-01 1.0000000e-01 -8.1318023e-02 -1.2796187e-01 3.7317285e-02 1.4079372e-01 1.1095792e-02 -1.3697834e-01 -5.8196919e-02 1.1696686e-01 9.8416932e-02 -8.3125388e-02 -1.2700028e-01 3.9455301e-02 1.4056731e-01 8.8799204e-03 -1.3751387e-01 -5.6165193e-02 1.1820101e-01 9.6809580e-02 -8.4912243e-02 -1.2600735e-01 4.1583582e-02 1.4030621e-01 6.6618581e-03 -1.3801547e-01 -5.4119610e-02 1.1940600e-01 9.5178342e-02 -8.6678147e-02 -1.2498333e-01 4.3701602e-02 1.4001049e-01 4.4421521e-03 -1.3848302e-01 -5.2060674e-02 1.2058153e-01 9.3523621e-02 -8.8422664e-02 -1.2392848e-01 4.5808841e-02 1.3968022e-01 2.2213501e-03 -1.3891640e-01 -4.9988892e-02 1.2172730e-01 9.1845824e-02 -9.0145365e-02 -1.2284305e-01 4.7904776e-02 1.3931550e-01 1.0000000e-01 1.9926443e-02 -1.3580604e-01 -5.8196919e-02 1.1940600e-01 9.1845824e-02 -9.3523621e-02 -1.1820101e-01 6.0214285e-02 1.3516954e-01 -2.2123174e-02 -1.4140391e-01 -1.7724796e-02 1.3640902e-01 5.6165193e-02 -1.2058153e-01 -9.0145365e-02 9.5178342e-02 1.1696686e-01 -6.2216794e-02 -1.3449970e-01 2.4314447e-02 1.4135157e-01 1.5518775e-02 -1.3697834e-01 -5.4119610e-02 1.2172730e-01 8.8422664e-02 -9.6809580e-02 -1.1570384e-01 6.4203952e-02 1.3379667e-01 -2.6499720e-02 -1.4126436e-01 -1.3308925e-02 1.3751387e-01 5.2060674e-02 -1.2284305e-01 -8.6678147e-02 9.8416932e-02 1.1441228e-01 -6.6175269e-02 -1.3306063e-01 2.8678454e-02 1.4114229e-01 1.1095792e-02 -1.3801547e-01 -4.9988892e-02 1.2392848e-01 8.4912243e-02 -1.0000000e-01 -1.1309249e-01 6.8130258e-02 1.3229176e-01 -3.0850113e-02 -1.4098540e-01 -8.8799204e-03 1.3848302e-01 4.7904776e-02 -1.2498333e-01 -8.3125388e-02 1.0155839e-01 1.1174479e-01 -7.0068437e-02 -1.3149025e-01 3.3014160e-02 1.4079372e-01 6.6618581e-03 -1.3891640e-01 -4.5808841e-02 1.2600735e-01 8.1318023e-02 -1.0309173e-01 -1.1036953e-01 7.1989327e-02 1.3065630e-01 -3.5170061e-02 -1.4056731e-01 -4.4421521e-03 1.3931550e-01 4.3701602e-02 -1.2700028e-01 -7.9490594e-02 1.0459963e-01 1.0896703e-01 -7.3892456e-02 -1.2979010e-01 3.7317285e-02 1.4030621e-01 2.2213501e-03 -1.3968022e-01 -4.1583582e-02 1.2796187e-01 7.7643552e-02 -1.0608172e-01 -1.0753764e-01 7.5777352e-02 1.2889189e-01 -3.9455301e-02 -1.4001049e-01 1.0000000e-01 1.5518775e-02 -1.3801547e-01 -4.5808841e-02 1.2796187e-01 7.3892456e-02 -1.1174479e-01 -9.8416932e-02 9.0145365e-02 1.1820101e-01 -6.4203952e-02 -1.3229176e-01 3.5170061e-02 1.4001049e-01 -4.4421521e-03 -1.4098540e-01 -2.6499720e-02 1.3516954e-01 5.6165193e-02 -1.2284305e-01 -8.3125388e-02 1.0459963e-01 1.0608172e-01 -8.1318023e-02 -1.2392848e-01 5.4119610e-02 1.3580604e-01 -2.4314447e-02 -1.4114229e-01 -6.6618581e-03 1.3968022e-01 3.7317285e-02 -1.3149025e-01 -6.6175269e-02 1.1696686e-01 9.1845824e-02 -9.6809580e-02 -1.1309249e-01 7.1989327e-02 1.2889189e-01 -4.3701602e-02 -1.3848302e-01 1.3308925e-02 1.4140391e-01 1.7724796e-02 -1.3751387e-01 -4.7904776e-02 1.2700028e-01 7.5777352e-02 -1.1036953e-01 -1.0000000e-01 8.8422664e-02 1.1940600e-01 -6.2216794e-02 -1.3306063e-01 3.3014160e-02 1.4030621e-01 -2.2213501e-03 -1.4079372e-01 -2.8678454e-02 1.3449970e-01 5.8196919e-02 -1.2172730e-01 -8.4912243e-02 1.0309173e-01 1.0753764e-01 -7.9490594e-02 -1.2498333e-01 5.2060674e-02 1.3640902e-01 -2.2123174e-02 -1.4126436e-01 -8.8799204e-03 1.3931550e-01 3.9455301e-02 -1.3065630e-01 -6.8130258e-02 1.1570384e-01 9.3523621e-02 -9.5178342e-02 -1.1441228e-01 7.0068437e-02 1.2979010e-01 -4.1583582e-02 -1.3891640e-01 1.1095792e-02 1.4135157e-01 1.9926443e-02 -1.3697834e-01 -4.9988892e-02 1.2600735e-01 7.7643552e-02 -1.0896703e-01 -1.0155839e-01 8.6678147e-02 1.2058153e-01 -6.0214285e-02 -1.3379667e-01 3.0850113e-02 1.4056731e-01 1.0000000e-01 1.1095792e-02 -1.3968022e-01 -3.3014160e-02 1.3449970e-01 5.4119610e-02 -1.2600735e-01 -7.3892456e-02 1.1441228e-01 9.1845824e-02 -1.0000000e-01 -1.0753764e-01 8.3125388e-02 1.2058153e-01 -6.4203952e-02 -1.3065630e-01 4.3701602e-02 1.3751387e-01 -2.2123174e-02 -1.4098540e-01 8.6683143e-17 1.4098540e-01 2.2123174e-02 -1.3751387e-01 -4.3701602e-02 1.3065630e-01 6.4203952e-02 -1.2058153e-01 -8.3125388e-02 1.0753764e-01 1.0000000e-01 -9.1845824e-02 -1.1441228e-01 7.3892456e-02 1.2600735e-01 -5.4119610e-02 -1.3449970e-01 3.3014160e-02 1.3968022e-01 -1.1095792e-02 -1.4142136e-01 -1.1095792e-02 1.3968022e-01 3.3014160e-02 -1.3449970e-01 -5.4119610e-02 1.2600735e-01 7.3892456e-02 -1.1441228e-01 -9.1845824e-02 1.0000000e-01 1.0753764e-01 -8.3125388e-02 -1.2058153e-01 6.4203952e-02 1.3065630e-01 -4.3701602e-02 -1.3751387e-01 2.2123174e-02 1.4098540e-01 1.2472393e-15 -1.4098540e-01 -2.2123174e-02 1.3751387e-01 4.3701602e-02 -1.3065630e-01 -6.4203952e-02 1.2058153e-01 8.3125388e-02 -1.0753764e-01 -1.0000000e-01 9.1845824e-02 1.1441228e-01 -7.3892456e-02 -1.2600735e-01 5.4119610e-02 1.3449970e-01 -3.3014160e-02 -1.3968022e-01 1.1095792e-02 1.4142136e-01 1.1095792e-02 -1.3968022e-01 -3.3014160e-02 1.3449970e-01 5.4119610e-02 -1.2600735e-01 -7.3892456e-02 1.1441228e-01 9.1845824e-02 -1.0000000e-01 -1.0753764e-01 8.3125388e-02 1.2058153e-01 -6.4203952e-02 -1.3065630e-01 4.3701602e-02 1.3751387e-01 -2.2123174e-02 -1.4098540e-01 1.0000000e-01 6.6618581e-03 -1.4079372e-01 -1.9926443e-02 1.3891640e-01 3.3014160e-02 -1.3580604e-01 -4.5808841e-02 1.3149025e-01 5.8196919e-02 -1.2600735e-01 -7.0068437e-02 1.1940600e-01 8.1318023e-02 -1.1174479e-01 -9.1845824e-02 1.0309173e-01 1.0155839e-01 -9.3523621e-02 -1.1036953e-01 8.3125388e-02 1.1820101e-01 -7.1989327e-02 -1.2498333e-01 6.0214285e-02 1.3065630e-01 -4.7904776e-02 -1.3516954e-01 3.5170061e-02 1.3848302e-01 -2.2123174e-02 -1.4056731e-01 8.8799204e-03 1.4140391e-01 4.4421521e-03 -1.4098540e-01 -1.7724796e-02 1.3931550e-01 3.0850113e-02 -1.3640902e-01 -4.3701602e-02 1.3229176e-01 5.6165193e-02 -1.2700028e-01 -6.8130258e-02 1.2058153e-01 7.9490594e-02 -1.1309249e-01 -9.0145365e-02 1.0459963e-01 1.0000000e-01 -9.5178342e-02 -1.0896703e-01 8.4912243e-02 1.1696686e-01 -7.3892456e-02 -1.2392848e-01 6.2216794e-02 1.2979010e-01 -4.9988892e-02 -1.3449970e-01 3.7317285e-02 1.3801547e-01 -2.4314447e-02 -1.4030621e-01 1.1095792e-02 1.4135157e-01 2.2213501e-03 -1.4114229e-01 -1.5518775e-02 1.3968022e-01 2.8678454e-02 -1.3697834e-01 -4.1583582e-02 1.3306063e-01 5.4119610e-02 -1.2796187e-01 -6.6175269e-02 1.2172730e-01 7.7643552e-02 -1.1441228e-01 -8.8422664e-02 1.0608172e-01 9.8416932e-02 -9.6809580e-02 -1.0753764e-01 8.6678147e-02 1.1570384e-01 -7.5777352e-02 -1.2284305e-01 6.4203952e-02 1.2889189e-01 -5.2060674e-02 -1.3379667e-01 3.9455301e-02 1.3751387e-01 -2.6499720e-02 -1.4001049e-01 1.3308925e-02 1.4126436e-01 1.0000000e-01 2.2213501e-03 -1.4135157e-01 -6.6618581e-03 1.4114229e-01 1.1095792e-02 -1.4079372e-01 -1.5518775e-02 1.4030621e-01 1.9926443e-02 -1.3968022e-01 -2.4314447e-02 1.3891640e-01 2.8678454e-02 -1.3801547e-01 -3.3014160e-02 1.3697834e-01 3.7317285e-02 -1.3580604e-01 -4.1583582e-02 1.3449970e-01 4.5808841e-02 -1.3306063e-01 -4.9988892e-02 1.3149025e-01 5.4119610e-02 -1.2979010e-01 -5.8196919e-02 1.2796187e-01 6.2216794e-02 -1.2600735e-01 -6.6175269e-02 1.2392848e-01 7.0068437e-02 -1.2172730e-01 -7.3892456e-02 1.1940600e-01 7.7643552e-02 -1.1696686e-01 -8.1318023e-02 1.1441228e-01 8.4912243e-02 -1.1174479e-01 -8.8422664e-02 1.0896703e-01 9.1845824e-02 -1.0608172e-01 -9.5178342e-02 1.0309173e-01 9.8416932e-02 -1.0000000e-01 -1.0155839e-01 9.6809580e-02 1.0459963e-01 -9.3523621e-02 -1.0753764e-01 9.0145365e-02 1.1036953e-01 -8.6678147e-02 -1.1309249e-01 8.3125388e-02 1.1570384e-01 -7.9490594e-02 -1.1820101e-01 7.5777352e-02 1.2058153e-01 -7.1989327e-02 -1.2284305e-01 6.8130258e-02 1.2498333e-01 -6.4203952e-02 -1.2700028e-01 6.0214285e-02 1.2889189e-01 -5.6165193e-02 -1.3065630e-01 5.2060674e-02 1.3229176e-01 -4.7904776e-02 -1.3379667e-01 4.3701602e-02 1.3516954e-01 -3.9455301e-02 -1.3640902e-01 3.5170061e-02 1.3751387e-01 -3.0850113e-02 -1.3848302e-01 2.6499720e-02 1.3931550e-01 -2.2123174e-02 -1.4001049e-01 1.7724796e-02 1.4056731e-01 -1.3308925e-02 -1.4098540e-01 8.8799204e-03 1.4126436e-01 -4.4421521e-03 -1.4140391e-01 1.0000000e-01 -2.2213501e-03 -1.4135157e-01 6.6618581e-03 1.4114229e-01 -1.1095792e-02 -1.4079372e-01 1.5518775e-02 1.4030621e-01 -1.9926443e-02 -1.3968022e-01 2.4314447e-02 1.3891640e-01 -2.8678454e-02 -1.3801547e-01 3.3014160e-02 1.3697834e-01 -3.7317285e-02 -1.3580604e-01 4.1583582e-02 1.3449970e-01 -4.5808841e-02 -1.3306063e-01 4.9988892e-02 1.3149025e-01 -5.4119610e-02 -1.2979010e-01 5.8196919e-02 1.2796187e-01 -6.2216794e-02 -1.2600735e-01 6.6175269e-02 1.2392848e-01 -7.0068437e-02 -1.2172730e-01 7.3892456e-02 1.1940600e-01 -7.7643552e-02 -1.1696686e-01 8.1318023e-02 1.1441228e-01 -8.4912243e-02 -1.1174479e-01 8.8422664e-02 1.0896703e-01 -9.1845824e-02 -1.0608172e-01 9.5178342e-02 1.0309173e-01 -9.8416932e-02 -1.0000000e-01 1.0155839e-01 9.6809580e-02 -1.0459963e-01 -9.3523621e-02 1.0753764e-01 9.0145365e-02 -1.1036953e-01 -8.6678147e-02 1.1309249e-01 8.3125388e-02 -1.1570384e-01 -7.9490594e-02 1.1820101e-01 7.5777352e-02 -1.2058153e-01 -7.1989327e-02 1.2284305e-01 6.8130258e-02 -1.2498333e-01 -6.4203952e-02 1.2700028e-01 6.0214285e-02 -1.2889189e-01 -5.6165193e-02 1.3065630e-01 5.2060674e-02 -1.3229176e-01 -4.7904776e-02 1.3379667e-01 4.3701602e-02 -1.3516954e-01 -3.9455301e-02 1.3640902e-01 3.5170061e-02 -1.3751387e-01 -3.0850113e-02 1.3848302e-01 2.6499720e-02 -1.3931550e-01 -2.2123174e-02 1.4001049e-01 1.7724796e-02 -1.4056731e-01 -1.3308925e-02 1.4098540e-01 8.8799204e-03 -1.4126436e-01 -4.4421521e-03 1.4140391e-01 1.0000000e-01 -6.6618581e-03 -1.4079372e-01 1.9926443e-02 1.3891640e-01 -3.3014160e-02 -1.3580604e-01 4.5808841e-02 1.3149025e-01 -5.8196919e-02 -1.2600735e-01 7.0068437e-02 1.1940600e-01 -8.1318023e-02 -1.1174479e-01 9.1845824e-02 1.0309173e-01 -1.0155839e-01 -9.3523621e-02 1.1036953e-01 8.3125388e-02 -1.1820101e-01 -7.1989327e-02 1.2498333e-01 6.0214285e-02 -1.3065630e-01 -4.7904776e-02 1.3516954e-01 3.5170061e-02 -1.3848302e-01 -2.2123174e-02 1.4056731e-01 8.8799204e-03 -1.4140391e-01 4.4421521e-03 1.4098540e-01 -1.7724796e-02 -1.3931550e-01 3.0850113e-02 1.3640902e-01 -4.3701602e-02 -1.3229176e-01 5.6165193e-02 1.2700028e-01 -6.8130258e-02 -1.2058153e-01 7.9490594e-02 1.1309249e-01 -9.0145365e-02 -1.0459963e-01 1.0000000e-01 9.5178342e-02 -1.0896703e-01 -8.4912243e-02 1.1696686e-01 7.3892456e-02 -1.2392848e-01 -6.2216794e-02 1.2979010e-01 4.9988892e-02 -1.3449970e-01 -3.7317285e-02 1.3801547e-01 2.4314447e-02 -1.4030621e-01 -1.1095792e-02 1.4135157e-01 -2.2213501e-03 -1.4114229e-01 1.5518775e-02 1.3968022e-01 -2.8678454e-02 -1.3697834e-01 4.1583582e-02 1.3306063e-01 -5.4119610e-02 -1.2796187e-01 6.6175269e-02 1.2172730e-01 -7.7643552e-02 -1.1441228e-01 8.8422664e-02 1.0608172e-01 -9.8416932e-02 -9.6809580e-02 1.0753764e-01 8.6678147e-02 -1.1570384e-01 -7.5777352e-02 1.2284305e-01 6.4203952e-02 -1.2889189e-01 -5.2060674e-02 1.3379667e-01 3.9455301e-02 -1.3751387e-01 -2.6499720e-02 1.4001049e-01 1.3308925e-02 -1.4126436e-01 1.0000000e-01 -1.1095792e-02 -1.3968022e-01 3.3014160e-02 1.3449970e-01 -5.4119610e-02 -1.2600735e-01 7.3892456e-02 1.1441228e-01 -9.1845824e-02 -1.0000000e-01 1.0753764e-01 8.3125388e-02 -1.2058153e-01 -6.4203952e-02 1.3065630e-01 4.3701602e-02 -1.3751387e-01 -2.2123174e-02 1.4098540e-01 -6.9364022e-17 -1.4098540e-01 2.2123174e-02 1.3751387e-01 -4.3701602e-02 -1.3065630e-01 6.4203952e-02 1.2058153e-01 -8.3125388e-02 -1.0753764e-01 1.0000000e-01 9.1845824e-02 -1.1441228e-01 -7.3892456e-02 1.2600735e-01 5.4119610e-02 -1.3449970e-01 -3.3014160e-02 1.3968022e-01 1.1095792e-02 -1.4142136e-01 1.1095792e-02 1.3968022e-01 -3.3014160e-02 -1.3449970e-01 5.4119610e-02 1.2600735e-01 -7.3892456e-02 -1.1441228e-01 9.1845824e-02 1.0000000e-01 -1.0753764e-01 -8.3125388e-02 1.2058153e-01 6.4203952e-02 -1.3065630e-01 -4.3701602e-02 1.3751387e-01 2.2123174e-02 -1.4098540e-01 2.0809206e-16 1.4098540e-01 -2.2123174e-02 -1.3751387e-01 4.3701602e-02 1.3065630e-01 -6.4203952e-02 -1.2058153e-01 8.3125388e-02 1.0753764e-01 -1.0000000e-01 -9.1845824e-02 1.1441228e-01 7.3892456e-02 -1.2600735e-01 -5.4119610e-02 1.3449970e-01 3.3014160e-02 -1.3968022e-01 -1.1095792e-02 1.4142136e-01 -1.1095792e-02 -1.3968022e-01 3.3014160e-02 1.3449970e-01 -5.4119610e-02 -1.2600735e-01 7.3892456e-02 1.1441228e-01 -9.1845824e-02 -1.0000000e-01 1.0753764e-01 8.3125388e-02 -1.2058153e-01 -6.4203952e-02 1.3065630e-01 4.3701602e-02 -1.3751387e-01 -2.2123174e-02 1.4098540e-01 1.0000000e-01 -1.5518775e-02 -1.3801547e-01 4.5808841e-02 1.2796187e-01 -7.3892456e-02 -1.1174479e-01 9.8416932e-02 9.0145365e-02 -1.1820101e-01 -6.4203952e-02 1.3229176e-01 3.5170061e-02 -1.4001049e-01 -4.4421521e-03 1.4098540e-01 -2.6499720e-02 -1.3516954e-01 5.6165193e-02 1.2284305e-01 -8.3125388e-02 -1.0459963e-01 1.0608172e-01 8.1318023e-02 -1.2392848e-01 -5.4119610e-02 1.3580604e-01 2.4314447e-02 -1.4114229e-01 6.6618581e-03 1.3968022e-01 -3.7317285e-02 -1.3149025e-01 6.6175269e-02 1.1696686e-01 -9.1845824e-02 -9.6809580e-02 1.1309249e-01 7.1989327e-02 -1.2889189e-01 -4.3701602e-02 1.3848302e-01 1.3308925e-02 -1.4140391e-01 1.7724796e-02 1.3751387e-01 -4.7904776e-02 -1.2700028e-01 7.5777352e-02 1.1036953e-01 -1.0000000e-01 -8.8422664e-02 1.1940600e-01 6.2216794e-02 -1.3306063e-01 -3.3014160e-02 1.4030621e-01 2.2213501e-03 -1.4079372e-01 2.8678454e-02 1.3449970e-01 -5.8196919e-02 -1.2172730e-01 8.4912243e-02 1.0309173e-01 -1.0753764e-01 -7.9490594e-02 1.2498333e-01 5.2060674e-02 -1.3640902e-01 -2.2123174e-02 1.4126436e-01 -8.8799204e-03 -1.3931550e-01 3.9455301e-02 1.3065630e-01 -6.8130258e-02 -1.1570384e-01 9.3523621e-02 9.5178342e-02 -1.1441228e-01 -7.0068437e-02 1.2979010e-01 4.1583582e-02 -1.3891640e-01 -1.1095792e-02 1.4135157e-01 -1.9926443e-02 -1.3697834e-01 4.9988892e-02 1.2600735e-01 -7.7643552e-02 -1.0896703e-01 1.0155839e-01 8.6678147e-02 -1.2058153e-01 -6.0214285e-02 1.3379667e-01 3.0850113e-02 -1.4056731e-01 1.0000000e-01 -1.9926443e-02 -1.3580604e-01 5.8196919e-02 1.1940600e-01 -9.1845824e-02 -9.3523621e-02 1.1820101e-01 6.0214285e-02 -1.3516954e-01 -2.2123174e-02 1.4140391e-01 -1.7724796e-02 -1.3640902e-01 5.6165193e-02 1.2058153e-01 -9.0145365e-02 -9.5178342e-02 1.1696686e-01 6.2216794e-02 -1.3449970e-01 -2.4314447e-02 1.4135157e-01 -1.5518775e-02 -1.3697834e-01 5.4119610e-02 1.2172730e-01 -8.8422664e-02 -9.6809580e-02 1.1570384e-01 6.4203952e-02 -1.3379667e-01 -2.6499720e-02 1.4126436e-01 -1.3308925e-02 -1.3751387e-01 5.2060674e-02 1.2284305e-01 -8.6678147e-02 -9.8416932e-02 1.1441228e-01 6.6175269e-02 -1.3306063e-01 -2.8678454e-02 1.4114229e-01 -1.1095792e-02 -1.3801547e-01 4.9988892e-02 1.2392848e-01 -8.4912243e-02 -1.0000000e-01 1.1309249e-01 6.8130258e-02 -1.3229176e-01 -3.0850113e-02 1.4098540e-01 -8.8799204e-03 -1.3848302e-01 4.7904776e-02 1.2498333e-01 -8.3125388e-02 -1.0155839e-01 1.1174479e-01 7.0068437e-02 -1.3149025e-01 -3.3014160e-02 1.4079372e-01 -6.6618581e-03 -1.3891640e-01 4.5808841e-02 1.2600735e-01 -8.1318023e-02 -1.0309173e-01 1.1036953e-01 7.1989327e-02 -1.3065630e-01 -3.5170061e-02 1.4056731e-01 -4.4421521e-03 -1.3931550e-01 4.3701602e-02 1.2700028e-01 -7.9490594e-02 -1.0459963e-01 1.0896703e-01 7.3892456e-02 -1.2979010e-01 -3.7317285e-02 1.4030621e-01 -2.2213501e-03 -1.3968022e-01 4.1583582e-02 1.2796187e-01 -7.7643552e-02 -1.0608172e-01 1.0753764e-01 7.5777352e-02 -1.2889189e-01 -3.9455301e-02 1.4001049e-01 1.0000000e-01 -2.4314447e-02 -1.3306063e-01 7.0068437e-02 1.0896703e-01 -1.0753764e-01 -7.1989327e-02 1.3229176e-01 2.6499720e-02 -1.4140391e-01 2.2123174e-02 1.3379667e-01 -6.8130258e-02 -1.1036953e-01 1.0608172e-01 7.3892456e-02 -1.3149025e-01 -2.8678454e-02 1.4135157e-01 -1.9926443e-02 -1.3449970e-01 6.6175269e-02 1.1174479e-01 -1.0459963e-01 -7.5777352e-02 1.3065630e-01 3.0850113e-02 -1.4126436e-01 1.7724796e-02 1.3516954e-01 -6.4203952e-02 -1.1309249e-01 1.0309173e-01 7.7643552e-02 -1.2979010e-01 -3.3014160e-02 1.4114229e-01 -1.5518775e-02 -1.3580604e-01 6.2216794e-02 1.1441228e-01 -1.0155839e-01 -7.9490594e-02 1.2889189e-01 3.5170061e-02 -1.4098540e-01 1.3308925e-02 1.3640902e-01 -6.0214285e-02 -1.1570384e-01 1.0000000e-01 8.1318023e-02 -1.2796187e-01 -3.7317285e-02 1.4079372e-01 -1.1095792e-02 -1.3697834e-01 5.8196919e-02 1.1696686e-01 -9.8416932e-02 -8.3125388e-02 1.2700028e-01 3.9455301e-02 -1.4056731e-01 8.8799204e-03 1.3751387e-01 -5.6165193e-02 -1.1820101e-01 9.6809580e-02 8.4912243e-02 -1.2600735e-01 -4.1583582e-02 1.4030621e-01 -6.6618581e-03 -1.3801547e-01 5.4119610e-02 1.1940600e-01 -9.5178342e-02 -8.6678147e-02 1.2498333e-01 4.3701602e-02 -1.4001049e-01 4.4421521e-03 1.3848302e-01 -5.2060674e-02 -1.2058153e-01 9.3523621e-02 8.8422664e-02 -1.2392848e-01 -4.5808841e-02 1.3968022e-01 -2.2213501e-03 -1.3891640e-01 4.9988892e-02 1.2172730e-01 -9.1845824e-02 -9.0145365e-02 1.2284305e-01 4.7904776e-02 -1.3931550e-01 1.0000000e-01 -2.8678454e-02 -1.2979010e-01 8.1318023e-02 9.6809580e-02 -1.2058153e-01 -4.7904776e-02 1.4001049e-01 -8.8799204e-03 -1.3640902e-01 6.4203952e-02 1.1036953e-01 -1.0896703e-01 -6.6175269e-02 1.3580604e-01 1.1095792e-02 -1.4030621e-01 4.5808841e-02 1.2172730e-01 -9.5178342e-02 -8.3125388e-02 1.2889189e-01 3.0850113e-02 -1.4140391e-01 2.6499720e-02 1.3065630e-01 -7.9490594e-02 -9.8416932e-02 1.1940600e-01 4.9988892e-02 -1.3968022e-01 6.6618581e-03 1.3697834e-01 -6.2216794e-02 -1.1174479e-01 1.0753764e-01 6.8130258e-02 -1.3516954e-01 -1.3308925e-02 1.4056731e-01 -4.3701602e-02 -1.2284305e-01 9.3523621e-02 8.4912243e-02 -1.2796187e-01 -3.3014160e-02 1.4135157e-01 -2.4314447e-02 -1.3149025e-01 7.7643552e-02 1.0000000e-01 -1.1820101e-01 -5.2060674e-02 1.3931550e-01 -4.4421521e-03 -1.3751387e-01 6.0214285e-02 1.1309249e-01 -1.0608172e-01 -7.0068437e-02 1.3449970e-01 1.5518775e-02 -1.4079372e-01 4.1583582e-02 1.2392848e-01 -9.1845824e-02 -8.6678147e-02 1.2700028e-01 3.5170061e-02 -1.4126436e-01 2.2123174e-02 1.3229176e-01 -7.5777352e-02 -1.0155839e-01 1.1696686e-01 5.4119610e-02 -1.3891640e-01 2.2213501e-03 1.3801547e-01 -5.8196919e-02 -1.1441228e-01 1.0459963e-01 7.1989327e-02 -1.3379667e-01 -1.7724796e-02 1.4098540e-01 -3.9455301e-02 -1.2498333e-01 9.0145365e-02 8.8422664e-02 -1.2600735e-01 -3.7317285e-02 1.4114229e-01 -1.9926443e-02 -1.3306063e-01 7.3892456e-02 1.0309173e-01 -1.1570384e-01 -5.6165193e-02 1.3848302e-01 1.0000000e-01 -3.3014160e-02 -1.2600735e-01 9.1845824e-02 8.3125388e-02 -1.3065630e-01 -2.2123174e-02 1.4098540e-01 -4.3701602e-02 -1.2058153e-01 1.0000000e-01 7.3892456e-02 -1.3449970e-01 -1.1095792e-02 1.3968022e-01 -5.4119610e-02 -1.1441228e-01 1.0753764e-01 6.4203952e-02 -1.3751387e-01 5.5447449e-16 1.3751387e-01 -6.4203952e-02 -1.0753764e-01 1.1441228e-01 5.4119610e-02 -1.3968022e-01 1.1095792e-02 1.3449970e-01 -7.3892456e-02 -1.0000000e-01 1.2058153e-01 4.3701602e-02 -1.4098540e-01 2.2123174e-02 1.3065630e-01 -8.3125388e-02 -9.1845824e-02 1.2600735e-01 3.3014160e-02 -1.4142136e-01 3.3014160e-02 1.2600735e-01 -9.1845824e-02 -8.3125388e-02 1.3065630e-01 2.2123174e-02 -1.4098540e-01 4.3701602e-02 1.2058153e-01 -1.0000000e-01 -7.3892456e-02 1.3449970e-01 1.1095792e-02 -1.3968022e-01 5.4119610e-02 1.1441228e-01 -1.0753764e-01 -6.4203952e-02 1.3751387e-01 -1.6634235e-15 -1.3751387e-01 6.4203952e-02 1.0753764e-01 -1.1441228e-01 -5.4119610e-02 1.3968022e-01 -1.1095792e-02 -1.3449970e-01 7.3892456e-02 1.0000000e-01 -1.2058153e-01 -4.3701602e-02 1.4098540e-01 -2.2123174e-02 -1.3065630e-01 8.3125388e-02 9.1845824e-02 -1.2600735e-01 -3.3014160e-02 1.4142136e-01 -3.3014160e-02 -1.2600735e-01 9.1845824e-02 8.3125388e-02 -1.3065630e-01 -2.2123174e-02 1.4098540e-01 -4.3701602e-02 -1.2058153e-01 1.0000000e-01 7.3892456e-02 -1.3449970e-01 -1.1095792e-02 1.3968022e-01 -5.4119610e-02 -1.1441228e-01 1.0753764e-01 6.4203952e-02 -1.3751387e-01 1.0000000e-01 -3.7317285e-02 -1.2172730e-01 1.0155839e-01 6.8130258e-02 -1.3751387e-01 4.4421521e-03 1.3516954e-01 -7.5777352e-02 -9.5178342e-02 1.2600735e-01 2.8678454e-02 -1.4114229e-01 4.5808841e-02 1.1696686e-01 -1.0753764e-01 -6.0214285e-02 1.3931550e-01 -1.3308925e-02 -1.3229176e-01 8.3125388e-02 8.8422664e-02 -1.2979010e-01 -1.9926443e-02 1.4030621e-01 -5.4119610e-02 -1.1174479e-01 1.1309249e-01 5.2060674e-02 -1.4056731e-01 2.2123174e-02 1.2889189e-01 -9.0145365e-02 -8.1318023e-02 1.3306063e-01 1.1095792e-02 -1.3891640e-01 6.2216794e-02 1.0608172e-01 -1.1820101e-01 -4.3701602e-02 1.4126436e-01 -3.0850113e-02 -1.2498333e-01 9.6809580e-02 7.3892456e-02 -1.3580604e-01 -2.2213501e-03 1.3697834e-01 -7.0068437e-02 -1.0000000e-01 1.2284305e-01 3.5170061e-02 -1.4140391e-01 3.9455301e-02 1.2058153e-01 -1.0309173e-01 -6.6175269e-02 1.3801547e-01 -6.6618581e-03 -1.3449970e-01 7.7643552e-02 9.3523621e-02 -1.2700028e-01 -2.6499720e-02 1.4098540e-01 -4.7904776e-02 -1.1570384e-01 1.0896703e-01 5.8196919e-02 -1.3968022e-01 1.5518775e-02 1.3149025e-01 -8.4912243e-02 -8.6678147e-02 1.3065630e-01 1.7724796e-02 -1.4001049e-01 5.6165193e-02 1.1036953e-01 -1.1441228e-01 -4.9988892e-02 1.4079372e-01 -2.4314447e-02 -1.2796187e-01 9.1845824e-02 7.9490594e-02 -1.3379667e-01 -8.8799204e-03 1.3848302e-01 -6.4203952e-02 -1.0459963e-01 1.1940600e-01 4.1583582e-02 -1.4135157e-01 3.3014160e-02 1.2392848e-01 -9.8416932e-02 -7.1989327e-02 1.3640902e-01 1.0000000e-01 -4.1583582e-02 -1.1696686e-01 1.1036953e-01 5.2060674e-02 -1.4098540e-01 3.0850113e-02 1.2284305e-01 -1.0309173e-01 -6.2216794e-02 1.3968022e-01 -1.9926443e-02 -1.2796187e-01 9.5178342e-02 7.1989327e-02 -1.3751387e-01 8.8799204e-03 1.3229176e-01 -8.6678147e-02 -8.1318023e-02 1.3449970e-01 2.2213501e-03 -1.3580604e-01 7.7643552e-02 9.0145365e-02 -1.3065630e-01 -1.3308925e-02 1.3848302e-01 -6.8130258e-02 -9.8416932e-02 1.2600735e-01 2.4314447e-02 -1.4030621e-01 5.8196919e-02 1.0608172e-01 -1.2058153e-01 -3.5170061e-02 1.4126436e-01 -4.7904776e-02 -1.1309249e-01 1.1441228e-01 4.5808841e-02 -1.4135157e-01 3.7317285e-02 1.1940600e-01 -1.0753764e-01 -5.6165193e-02 1.4056731e-01 -2.6499720e-02 -1.2498333e-01 1.0000000e-01 6.6175269e-02 -1.3891640e-01 1.5518775e-02 1.2979010e-01 -9.1845824e-02 -7.5777352e-02 1.3640902e-01 -4.4421521e-03 -1.3379667e-01 8.3125388e-02 8.4912243e-02 -1.3306063e-01 -6.6618581e-03 1.3697834e-01 -7.3892456e-02 -9.3523621e-02 1.2889189e-01 1.7724796e-02 -1.3931550e-01 6.4203952e-02 1.0155839e-01 -1.2392848e-01 -2.8678454e-02 1.4079372e-01 -5.4119610e-02 -1.0896703e-01 1.1820101e-01 3.9455301e-02 -1.4140391e-01 4.3701602e-02 1.1570384e-01 -1.1174479e-01 -4.9988892e-02 1.4114229e-01 -3.3014160e-02 -1.2172730e-01 1.0459963e-01 6.0214285e-02 -1.4001049e-01 2.2123174e-02 1.2700028e-01 -9.6809580e-02 -7.0068437e-02 1.3801547e-01 -1.1095792e-02 -1.3149025e-01 8.8422664e-02 7.9490594e-02 -1.3516954e-01 1.0000000e-01 -4.5808841e-02 -1.1174479e-01 1.1820101e-01 3.5170061e-02 -1.4098540e-01 5.6165193e-02 1.0459963e-01 -1.2392848e-01 -2.4314447e-02 1.3968022e-01 -6.6175269e-02 -9.6809580e-02 1.2889189e-01 1.3308925e-02 -1.3751387e-01 7.5777352e-02 8.8422664e-02 -1.3306063e-01 -2.2213501e-03 1.3449970e-01 -8.4912243e-02 -7.9490594e-02 1.3640902e-01 -8.8799204e-03 -1.3065630e-01 9.3523621e-02 7.0068437e-02 -1.3891640e-01 1.9926443e-02 1.2600735e-01 -1.0155839e-01 -6.0214285e-02 1.4056731e-01 -3.0850113e-02 -1.2058153e-01 1.0896703e-01 4.9988892e-02 -1.4135157e-01 4.1583582e-02 1.1441228e-01 -1.1570384e-01 -3.9455301e-02 1.4126436e-01 -5.2060674e-02 -1.0753764e-01 1.2172730e-01 2.8678454e-02 -1.4030621e-01 6.2216794e-02 1.0000000e-01 -1.2700028e-01 -1.7724796e-02 1.3848302e-01 -7.1989327e-02 -9.1845824e-02 1.3149025e-01 6.6618581e-03 -1.3580604e-01 8.1318023e-02 8.3125388e-02 -1.3516954e-01 4.4421521e-03 1.3229176e-01 -9.0145365e-02 -7.3892456e-02 1.3801547e-01 -1.5518775e-02 -1.2796187e-01 9.8416932e-02 6.4203952e-02 -1.4001049e-01 2.6499720e-02 1.2284305e-01 -1.0608172e-01 -5.4119610e-02 1.4114229e-01 -3.7317285e-02 -1.1696686e-01 1.1309249e-01 4.3701602e-02 -1.4140391e-01 4.7904776e-02 1.1036953e-01 -1.1940600e-01 -3.3014160e-02 1.4079372e-01 -5.8196919e-02 -1.0309173e-01 1.2498333e-01 2.2123174e-02 -1.3931550e-01 6.8130258e-02 9.5178342e-02 -1.2979010e-01 -1.1095792e-02 1.3697834e-01 -7.7643552e-02 -8.6678147e-02 1.3379667e-01 1.0000000e-01 -4.9988892e-02 -1.0608172e-01 1.2498333e-01 1.7724796e-02 -1.3751387e-01 7.9490594e-02 8.1318023e-02 -1.3697834e-01 1.5518775e-02 1.2600735e-01 -1.0459963e-01 -5.2060674e-02 1.4140391e-01 -4.7904776e-02 -1.0753764e-01 1.2392848e-01 1.9926443e-02 -1.3801547e-01 7.7643552e-02 8.3125388e-02 -1.3640902e-01 1.3308925e-02 1.2700028e-01 -1.0309173e-01 -5.4119610e-02 1.4135157e-01 -4.5808841e-02 -1.0896703e-01 1.2284305e-01 2.2123174e-02 -1.3848302e-01 7.5777352e-02 8.4912243e-02 -1.3580604e-01 1.1095792e-02 1.2796187e-01 -1.0155839e-01 -5.6165193e-02 1.4126436e-01 -4.3701602e-02 -1.1036953e-01 1.2172730e-01 2.4314447e-02 -1.3891640e-01 7.3892456e-02 8.6678147e-02 -1.3516954e-01 8.8799204e-03 1.2889189e-01 -1.0000000e-01 -5.8196919e-02 1.4114229e-01 -4.1583582e-02 -1.1174479e-01 1.2058153e-01 2.6499720e-02 -1.3931550e-01 7.1989327e-02 8.8422664e-02 -1.3449970e-01 6.6618581e-03 1.2979010e-01 -9.8416932e-02 -6.0214285e-02 1.4098540e-01 -3.9455301e-02 -1.1309249e-01 1.1940600e-01 2.8678454e-02 -1.3968022e-01 7.0068437e-02 9.0145365e-02 -1.3379667e-01 4.4421521e-03 1.3065630e-01 -9.6809580e-02 -6.2216794e-02 1.4079372e-01 -3.7317285e-02 -1.1441228e-01 1.1820101e-01 3.0850113e-02 -1.4001049e-01 6.8130258e-02 9.1845824e-02 -1.3306063e-01 2.2213501e-03 1.3149025e-01 -9.5178342e-02 -6.4203952e-02 1.4056731e-01 -3.5170061e-02 -1.1570384e-01 1.1696686e-01 3.3014160e-02 -1.4030621e-01 6.6175269e-02 9.3523621e-02 -1.3229176e-01 1.0000000e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 1.6890520e-16 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -3.8110820e-16 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 9.7013339e-16 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -5.5429941e-16 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 2.1481838e-15 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -1.7323498e-15 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 1.3165158e-15 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -2.9104002e-15 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 4.8484785e-16 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -6.9013873e-17 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 3.6726166e-15 -1.3065630e-01 1.0000000e-01 5.4119610e-02 -1.4142136e-01 5.4119610e-02 1.0000000e-01 -1.3065630e-01 -3.2567826e-15 1.3065630e-01 -1.0000000e-01 -5.4119610e-02 1.4142136e-01 -5.4119610e-02 -1.0000000e-01 1.3065630e-01 1.0000000e-01 -5.8196919e-02 -9.3523621e-02 1.3516954e-01 -1.7724796e-02 -1.2058153e-01 1.1696686e-01 2.4314447e-02 -1.3697834e-01 8.8422664e-02 6.4203952e-02 -1.4126436e-01 5.2060674e-02 9.8416932e-02 -1.3306063e-01 1.1095792e-02 1.2392848e-01 -1.1309249e-01 -3.0850113e-02 1.3848302e-01 -8.3125388e-02 -7.0068437e-02 1.4079372e-01 -4.5808841e-02 -1.0309173e-01 1.3065630e-01 -4.4421521e-03 -1.2700028e-01 1.0896703e-01 3.7317285e-02 -1.3968022e-01 7.7643552e-02 7.5777352e-02 -1.4001049e-01 3.9455301e-02 1.0753764e-01 -1.2796187e-01 -2.2213501e-03 1.2979010e-01 -1.0459963e-01 -4.3701602e-02 1.4056731e-01 -7.1989327e-02 -8.1318023e-02 1.3891640e-01 -3.3014160e-02 -1.1174479e-01 1.2498333e-01 8.8799204e-03 -1.3229176e-01 1.0000000e-01 4.9988892e-02 -1.4114229e-01 6.6175269e-02 8.6678147e-02 -1.3751387e-01 2.6499720e-02 1.1570384e-01 -1.2172730e-01 -1.5518775e-02 1.3449970e-01 -9.5178342e-02 -5.6165193e-02 1.4140391e-01 -6.0214285e-02 -9.1845824e-02 1.3580604e-01 -1.9926443e-02 -1.1940600e-01 1.1820101e-01 2.2123174e-02 -1.3640902e-01 9.0145365e-02 6.2216794e-02 -1.4135157e-01 5.4119610e-02 9.6809580e-02 -1.3379667e-01 1.3308925e-02 1.2284305e-01 -1.1441228e-01 -2.8678454e-02 1.3801547e-01 -8.4912243e-02 -6.8130258e-02 1.4098540e-01 -4.7904776e-02 -1.0155839e-01 1.3149025e-01 -6.6618581e-03 -1.2600735e-01 1.1036953e-01 3.5170061e-02 -1.3931550e-01 7.9490594e-02 7.3892456e-02 -1.4030621e-01 4.1583582e-02 1.0608172e-01 -1.2889189e-01 1.0000000e-01 -6.2216794e-02 -8.6678147e-02 1.3848302e-01 -3.5170061e-02 -1.0753764e-01 1.2979010e-01 -6.6618581e-03 -1.2392848e-01 1.1570384e-01 2.2123174e-02 -1.3516954e-01 9.6809580e-02 4.9988892e-02 -1.4079372e-01 7.3892456e-02 7.5777352e-02 -1.4056731e-01 4.7904776e-02 9.8416932e-02 -1.3449970e-01 1.9926443e-02 1.1696686e-01 -1.2284305e-01 -8.8799204e-03 1.3065630e-01 -1.0608172e-01 -3.7317285e-02 1.3891640e-01 -8.4912243e-02 -6.4203952e-02 1.4140391e-01 -6.0214285e-02 -8.8422664e-02 1.3801547e-01 -3.3014160e-02 -1.0896703e-01 1.2889189e-01 -4.4421521e-03 -1.2498333e-01 1.1441228e-01 2.4314447e-02 -1.3580604e-01 9.5178342e-02 5.2060674e-02 -1.4098540e-01 7.1989327e-02 7.7643552e-02 -1.4030621e-01 4.5808841e-02 1.0000000e-01 -1.3379667e-01 1.7724796e-02 1.1820101e-01 -1.2172730e-01 -1.1095792e-02 1.3149025e-01 -1.0459963e-01 -3.9455301e-02 1.3931550e-01 -8.3125388e-02 -6.6175269e-02 1.4135157e-01 -5.8196919e-02 -9.0145365e-02 1.3751387e-01 -3.0850113e-02 -1.1036953e-01 1.2796187e-01 -2.2213501e-03 -1.2600735e-01 1.1309249e-01 2.6499720e-02 -1.3640902e-01 9.3523621e-02 5.4119610e-02 -1.4114229e-01 7.0068437e-02 7.9490594e-02 -1.4001049e-01 4.3701602e-02 1.0155839e-01 -1.3306063e-01 1.5518775e-02 1.1940600e-01 -1.2058153e-01 -1.3308925e-02 1.3229176e-01 -1.0309173e-01 -4.1583582e-02 1.3968022e-01 -8.1318023e-02 -6.8130258e-02 1.4126436e-01 -5.6165193e-02 -9.1845824e-02 1.3697834e-01 -2.8678454e-02 -1.1174479e-01 1.2700028e-01 1.0000000e-01 -6.6175269e-02 -7.9490594e-02 1.4056731e-01 -5.2060674e-02 -9.1845824e-02 1.3801547e-01 -3.7317285e-02 -1.0309173e-01 1.3379667e-01 -2.2123174e-02 -1.1309249e-01 1.2796187e-01 -6.6618581e-03 -1.2172730e-01 1.2058153e-01 8.8799204e-03 -1.2889189e-01 1.1174479e-01 2.4314447e-02 -1.3449970e-01 1.0155839e-01 3.9455301e-02 -1.3848302e-01 9.0145365e-02 5.4119610e-02 -1.4079372e-01 7.7643552e-02 6.8130258e-02 -1.4140391e-01 6.4203952e-02 8.1318023e-02 -1.4030621e-01 4.9988892e-02 9.3523621e-02 -1.3751387e-01 3.5170061e-02 1.0459963e-01 -1.3306063e-01 1.9926443e-02 1.1441228e-01 -1.2700028e-01 4.4421521e-03 1.2284305e-01 -1.1940600e-01 -1.1095792e-02 1.2979010e-01 -1.1036953e-01 -2.6499720e-02 1.3516954e-01 -1.0000000e-01 -4.1583582e-02 1.3891640e-01 -8.8422664e-02 -5.6165193e-02 1.4098540e-01 -7.5777352e-02 -7.0068437e-02 1.4135157e-01 -6.2216794e-02 -8.3125388e-02 1.4001049e-01 -4.7904776e-02 -9.5178342e-02 1.3697834e-01 -3.3014160e-02 -1.0608172e-01 1.3229176e-01 -1.7724796e-02 -1.1570384e-01 1.2600735e-01 -2.2213501e-03 -1.2392848e-01 1.1820101e-01 1.3308925e-02 -1.3065630e-01 1.0896703e-01 2.8678454e-02 -1.3580604e-01 9.8416932e-02 4.3701602e-02 -1.3931550e-01 8.6678147e-02 5.8196919e-02 -1.4114229e-01 7.3892456e-02 7.1989327e-02 -1.4126436e-01 6.0214285e-02 8.4912243e-02 -1.3968022e-01 4.5808841e-02 9.6809580e-02 -1.3640902e-01 3.0850113e-02 1.0753764e-01 -1.3149025e-01 1.5518775e-02 1.1696686e-01 -1.2498333e-01 1.0000000e-01 -7.0068437e-02 -7.1989327e-02 1.4140391e-01 -6.8130258e-02 -7.3892456e-02 1.4135157e-01 -6.6175269e-02 -7.5777352e-02 1.4126436e-01 -6.4203952e-02 -7.7643552e-02 1.4114229e-01 -6.2216794e-02 -7.9490594e-02 1.4098540e-01 -6.0214285e-02 -8.1318023e-02 1.4079372e-01 -5.8196919e-02 -8.3125388e-02 1.4056731e-01 -5.6165193e-02 -8.4912243e-02 1.4030621e-01 -5.4119610e-02 -8.6678147e-02 1.4001049e-01 -5.2060674e-02 -8.8422664e-02 1.3968022e-01 -4.9988892e-02 -9.0145365e-02 1.3931550e-01 -4.7904776e-02 -9.1845824e-02 1.3891640e-01 -4.5808841e-02 -9.3523621e-02 1.3848302e-01 -4.3701602e-02 -9.5178342e-02 1.3801547e-01 -4.1583582e-02 -9.6809580e-02 1.3751387e-01 -3.9455301e-02 -9.8416932e-02 1.3697834e-01 -3.7317285e-02 -1.0000000e-01 1.3640902e-01 -3.5170061e-02 -1.0155839e-01 1.3580604e-01 -3.3014160e-02 -1.0309173e-01 1.3516954e-01 -3.0850113e-02 -1.0459963e-01 1.3449970e-01 -2.8678454e-02 -1.0608172e-01 1.3379667e-01 -2.6499720e-02 -1.0753764e-01 1.3306063e-01 -2.4314447e-02 -1.0896703e-01 1.3229176e-01 -2.2123174e-02 -1.1036953e-01 1.3149025e-01 -1.9926443e-02 -1.1174479e-01 1.3065630e-01 -1.7724796e-02 -1.1309249e-01 1.2979010e-01 -1.5518775e-02 -1.1441228e-01 1.2889189e-01 -1.3308925e-02 -1.1570384e-01 1.2796187e-01 -1.1095792e-02 -1.1696686e-01 1.2700028e-01 -8.8799204e-03 -1.1820101e-01 1.2600735e-01 -6.6618581e-03 -1.1940600e-01 1.2498333e-01 -4.4421521e-03 -1.2058153e-01 1.2392848e-01 -2.2213501e-03 -1.2172730e-01 1.2284305e-01 1.0000000e-01 -7.3892456e-02 -6.4203952e-02 1.4098540e-01 -8.3125388e-02 -5.4119610e-02 1.3968022e-01 -9.1845824e-02 -4.3701602e-02 1.3751387e-01 -1.0000000e-01 -3.3014160e-02 1.3449970e-01 -1.0753764e-01 -2.2123174e-02 1.3065630e-01 -1.1441228e-01 -1.1095792e-02 1.2600735e-01 -1.2058153e-01 -4.8502293e-16 1.2058153e-01 -1.2600735e-01 1.1095792e-02 1.1441228e-01 -1.3065630e-01 2.2123174e-02 1.0753764e-01 -1.3449970e-01 3.3014160e-02 1.0000000e-01 -1.3751387e-01 4.3701602e-02 9.1845824e-02 -1.3968022e-01 5.4119610e-02 8.3125388e-02 -1.4098540e-01 6.4203952e-02 7.3892456e-02 -1.4142136e-01 7.3892456e-02 6.4203952e-02 -1.4098540e-01 8.3125388e-02 5.4119610e-02 -1.3968022e-01 9.1845824e-02 4.3701602e-02 -1.3751387e-01 1.0000000e-01 3.3014160e-02 -1.3449970e-01 1.0753764e-01 2.2123174e-02 -1.3065630e-01 1.1441228e-01 1.1095792e-02 -1.2600735e-01 1.2058153e-01 1.4550688e-15 -1.2058153e-01 1.2600735e-01 -1.1095792e-02 -1.1441228e-01 1.3065630e-01 -2.2123174e-02 -1.0753764e-01 1.3449970e-01 -3.3014160e-02 -1.0000000e-01 1.3751387e-01 -4.3701602e-02 -9.1845824e-02 1.3968022e-01 -5.4119610e-02 -8.3125388e-02 1.4098540e-01 -6.4203952e-02 -7.3892456e-02 1.4142136e-01 -7.3892456e-02 -6.4203952e-02 1.4098540e-01 -8.3125388e-02 -5.4119610e-02 1.3968022e-01 -9.1845824e-02 -4.3701602e-02 1.3751387e-01 -1.0000000e-01 -3.3014160e-02 1.3449970e-01 -1.0753764e-01 -2.2123174e-02 1.3065630e-01 -1.1441228e-01 -1.1095792e-02 1.2600735e-01 -1.2058153e-01 1.0000000e-01 -7.7643552e-02 -5.6165193e-02 1.3931550e-01 -9.6809580e-02 -3.3014160e-02 1.3306063e-01 -1.1309249e-01 -8.8799204e-03 1.2284305e-01 -1.2600735e-01 1.5518775e-02 1.0896703e-01 -1.3516954e-01 3.9455301e-02 9.1845824e-02 -1.4030621e-01 6.2216794e-02 7.1989327e-02 -1.4126436e-01 8.3125388e-02 4.9988892e-02 -1.3801547e-01 1.0155839e-01 2.6499720e-02 -1.3065630e-01 1.1696686e-01 2.2213501e-03 -1.1940600e-01 1.2889189e-01 -2.2123174e-02 -1.0459963e-01 1.3697834e-01 -4.5808841e-02 -8.6678147e-02 1.4098540e-01 -6.8130258e-02 -6.6175269e-02 1.4079372e-01 -8.8422664e-02 -4.3701602e-02 1.3640902e-01 -1.0608172e-01 -1.9926443e-02 1.2796187e-01 -1.2058153e-01 4.4421521e-03 1.1570384e-01 -1.3149025e-01 2.8678454e-02 1.0000000e-01 -1.3848302e-01 5.2060674e-02 8.1318023e-02 -1.4135157e-01 7.3892456e-02 6.0214285e-02 -1.4001049e-01 9.3523621e-02 3.7317285e-02 -1.3449970e-01 1.1036953e-01 1.3308925e-02 -1.2498333e-01 1.2392848e-01 -1.1095792e-02 -1.1174479e-01 1.3379667e-01 -3.5170061e-02 -9.5178342e-02 1.3968022e-01 -5.8196919e-02 -7.5777352e-02 1.4140391e-01 -7.9490594e-02 -5.4119610e-02 1.3891640e-01 -9.8416932e-02 -3.0850113e-02 1.3229176e-01 -1.1441228e-01 -6.6618581e-03 1.2172730e-01 -1.2700028e-01 1.7724796e-02 1.0753764e-01 -1.3580604e-01 4.1583582e-02 9.0145365e-02 -1.4056731e-01 6.4203952e-02 7.0068437e-02 -1.4114229e-01 8.4912243e-02 4.7904776e-02 -1.3751387e-01 1.0309173e-01 2.4314447e-02 -1.2979010e-01 1.1820101e-01 1.0000000e-01 -8.1318023e-02 -4.7904776e-02 1.3640902e-01 -1.0896703e-01 -1.1095792e-02 1.2172730e-01 -1.2889189e-01 2.6499720e-02 9.8416932e-02 -1.3968022e-01 6.2216794e-02 6.8130258e-02 -1.4056731e-01 9.3523621e-02 3.3014160e-02 -1.3149025e-01 1.1820101e-01 -4.4421521e-03 -1.1309249e-01 1.3449970e-01 -4.1583582e-02 -8.6678147e-02 1.4126436e-01 -7.5777352e-02 -5.4119610e-02 1.3801547e-01 -1.0459963e-01 -1.7724796e-02 1.2498333e-01 -1.2600735e-01 1.9926443e-02 1.0309173e-01 -1.3848302e-01 5.6165193e-02 7.3892456e-02 -1.4114229e-01 8.8422664e-02 3.9455301e-02 -1.3379667e-01 1.1441228e-01 2.2213501e-03 -1.1696686e-01 1.3229176e-01 -3.5170061e-02 -9.1845824e-02 1.4079372e-01 -7.0068437e-02 -6.0214285e-02 1.3931550e-01 -1.0000000e-01 -2.4314447e-02 1.2796187e-01 -1.2284305e-01 1.3308925e-02 1.0753764e-01 -1.3697834e-01 4.9988892e-02 7.9490594e-02 -1.4140391e-01 8.3125388e-02 4.5808841e-02 -1.3580604e-01 1.1036953e-01 8.8799204e-03 -1.2058153e-01 1.2979010e-01 -2.8678454e-02 -9.6809580e-02 1.4001049e-01 -6.4203952e-02 -6.6175269e-02 1.4030621e-01 -9.5178342e-02 -3.0850113e-02 1.3065630e-01 -1.1940600e-01 6.6618581e-03 1.1174479e-01 -1.3516954e-01 4.3701602e-02 8.4912243e-02 -1.4135157e-01 7.7643552e-02 5.2060674e-02 -1.3751387e-01 1.0608172e-01 1.5518775e-02 -1.2392848e-01 1.2700028e-01 -2.2123174e-02 -1.0155839e-01 1.3891640e-01 -5.8196919e-02 -7.1989327e-02 1.4098540e-01 -9.0145365e-02 -3.7317285e-02 1.3306063e-01 -1.1570384e-01 1.0000000e-01 -8.4912243e-02 -3.9455301e-02 1.3229176e-01 -1.1940600e-01 1.1095792e-02 1.0608172e-01 -1.3848302e-01 6.0214285e-02 6.6175269e-02 -1.3968022e-01 1.0155839e-01 1.7724796e-02 -1.2284305e-01 1.2979010e-01 -3.3014160e-02 -9.0145365e-02 1.4126436e-01 -7.9490594e-02 -4.5808841e-02 1.3449970e-01 -1.1570384e-01 4.4421521e-03 1.1036953e-01 -1.3697834e-01 5.4119610e-02 7.1989327e-02 -1.4056731e-01 9.6809580e-02 2.4314447e-02 -1.2600735e-01 1.2700028e-01 -2.6499720e-02 -9.5178342e-02 1.4079372e-01 -7.3892456e-02 -5.2060674e-02 1.3640902e-01 -1.1174479e-01 -2.2213501e-03 1.1441228e-01 -1.3516954e-01 4.7904776e-02 7.7643552e-02 -1.4114229e-01 9.1845824e-02 3.0850113e-02 -1.2889189e-01 1.2392848e-01 -1.9926443e-02 -1.0000000e-01 1.4001049e-01 -6.8130258e-02 -5.8196919e-02 1.3801547e-01 -1.0753764e-01 -8.8799204e-03 1.1820101e-01 -1.3306063e-01 4.1583582e-02 8.3125388e-02 -1.4140391e-01 8.6678147e-02 3.7317285e-02 -1.3149025e-01 1.2058153e-01 -1.3308925e-02 -1.0459963e-01 1.3891640e-01 -6.2216794e-02 -6.4203952e-02 1.3931550e-01 -1.0309173e-01 -1.5518775e-02 1.2172730e-01 -1.3065630e-01 3.5170061e-02 8.8422664e-02 -1.4135157e-01 8.1318023e-02 4.3701602e-02 -1.3379667e-01 1.1696686e-01 -6.6618581e-03 -1.0896703e-01 1.3751387e-01 -5.6165193e-02 -7.0068437e-02 1.4030621e-01 -9.8416932e-02 -2.2123174e-02 1.2498333e-01 -1.2796187e-01 2.8678454e-02 9.3523621e-02 -1.4098540e-01 7.5777352e-02 4.9988892e-02 -1.3580604e-01 1.1309249e-01 1.0000000e-01 -8.8422664e-02 -3.0850113e-02 1.2700028e-01 -1.2796187e-01 3.3014160e-02 8.6678147e-02 -1.4140391e-01 9.0145365e-02 2.8678454e-02 -1.2600735e-01 1.2889189e-01 -3.5170061e-02 -8.4912243e-02 1.4135157e-01 -9.1845824e-02 -2.6499720e-02 1.2498333e-01 -1.2979010e-01 3.7317285e-02 8.3125388e-02 -1.4126436e-01 9.3523621e-02 2.4314447e-02 -1.2392848e-01 1.3065630e-01 -3.9455301e-02 -8.1318023e-02 1.4114229e-01 -9.5178342e-02 -2.2123174e-02 1.2284305e-01 -1.3149025e-01 4.1583582e-02 7.9490594e-02 -1.4098540e-01 9.6809580e-02 1.9926443e-02 -1.2172730e-01 1.3229176e-01 -4.3701602e-02 -7.7643552e-02 1.4079372e-01 -9.8416932e-02 -1.7724796e-02 1.2058153e-01 -1.3306063e-01 4.5808841e-02 7.5777352e-02 -1.4056731e-01 1.0000000e-01 1.5518775e-02 -1.1940600e-01 1.3379667e-01 -4.7904776e-02 -7.3892456e-02 1.4030621e-01 -1.0155839e-01 -1.3308925e-02 1.1820101e-01 -1.3449970e-01 4.9988892e-02 7.1989327e-02 -1.4001049e-01 1.0309173e-01 1.1095792e-02 -1.1696686e-01 1.3516954e-01 -5.2060674e-02 -7.0068437e-02 1.3968022e-01 -1.0459963e-01 -8.8799204e-03 1.1570384e-01 -1.3580604e-01 5.4119610e-02 6.8130258e-02 -1.3931550e-01 1.0608172e-01 6.6618581e-03 -1.1441228e-01 1.3640902e-01 -5.6165193e-02 -6.6175269e-02 1.3891640e-01 -1.0753764e-01 -4.4421521e-03 1.1309249e-01 -1.3697834e-01 5.8196919e-02 6.4203952e-02 -1.3848302e-01 1.0896703e-01 2.2213501e-03 -1.1174479e-01 1.3751387e-01 -6.0214285e-02 -6.2216794e-02 1.3801547e-01 -1.1036953e-01 1.0000000e-01 -9.1845824e-02 -2.2123174e-02 1.2058153e-01 -1.3449970e-01 5.4119610e-02 6.4203952e-02 -1.3751387e-01 1.1441228e-01 -1.1095792e-02 -1.0000000e-01 1.4098540e-01 -8.3125388e-02 -3.3014160e-02 1.2600735e-01 -1.3065630e-01 4.3701602e-02 7.3892456e-02 -1.3968022e-01 1.0753764e-01 1.0047716e-15 -1.0753764e-01 1.3968022e-01 -7.3892456e-02 -4.3701602e-02 1.3065630e-01 -1.2600735e-01 3.3014160e-02 8.3125388e-02 -1.4098540e-01 1.0000000e-01 1.1095792e-02 -1.1441228e-01 1.3751387e-01 -6.4203952e-02 -5.4119610e-02 1.3449970e-01 -1.2058153e-01 2.2123174e-02 9.1845824e-02 -1.4142136e-01 9.1845824e-02 2.2123174e-02 -1.2058153e-01 1.3449970e-01 -5.4119610e-02 -6.4203952e-02 1.3751387e-01 -1.1441228e-01 1.1095792e-02 1.0000000e-01 -1.4098540e-01 8.3125388e-02 3.3014160e-02 -1.2600735e-01 1.3065630e-01 -4.3701602e-02 -7.3892456e-02 1.3968022e-01 -1.0753764e-01 -2.0094557e-15 1.0753764e-01 -1.3968022e-01 7.3892456e-02 4.3701602e-02 -1.3065630e-01 1.2600735e-01 -3.3014160e-02 -8.3125388e-02 1.4098540e-01 -1.0000000e-01 -1.1095792e-02 1.1441228e-01 -1.3751387e-01 6.4203952e-02 5.4119610e-02 -1.3449970e-01 1.2058153e-01 -2.2123174e-02 -9.1845824e-02 1.4142136e-01 -9.1845824e-02 -2.2123174e-02 1.2058153e-01 -1.3449970e-01 5.4119610e-02 6.4203952e-02 -1.3751387e-01 1.1441228e-01 -1.1095792e-02 -1.0000000e-01 1.4098540e-01 -8.3125388e-02 -3.3014160e-02 1.2600735e-01 -1.3065630e-01 4.3701602e-02 7.3892456e-02 -1.3968022e-01 1.0753764e-01 1.0000000e-01 -9.5178342e-02 -1.3308925e-02 1.1309249e-01 -1.3891640e-01 7.3892456e-02 3.9455301e-02 -1.2700028e-01 1.3149025e-01 -4.9988892e-02 -6.4203952e-02 1.3640902e-01 -1.1940600e-01 2.4314447e-02 8.6678147e-02 -1.4098540e-01 1.0309173e-01 2.2213501e-03 -1.0608172e-01 1.4056731e-01 -8.3125388e-02 -2.8678454e-02 1.2172730e-01 -1.3516954e-01 6.0214285e-02 5.4119610e-02 -1.3306063e-01 1.2498333e-01 -3.5170061e-02 -7.7643552e-02 1.3968022e-01 -1.1036953e-01 8.8799204e-03 9.8416932e-02 -1.4135157e-01 9.1845824e-02 1.7724796e-02 -1.1570384e-01 1.3801547e-01 -7.0068437e-02 -4.3701602e-02 1.2889189e-01 -1.2979010e-01 4.5808841e-02 6.8130258e-02 -1.3751387e-01 1.1696686e-01 -1.9926443e-02 -9.0145365e-02 1.4126436e-01 -1.0000000e-01 -6.6618581e-03 1.0896703e-01 -1.4001049e-01 7.9490594e-02 3.3014160e-02 -1.2392848e-01 1.3379667e-01 -5.6165193e-02 -5.8196919e-02 1.3449970e-01 -1.2284305e-01 3.0850113e-02 8.1318023e-02 -1.4030621e-01 1.0753764e-01 -4.4421521e-03 -1.0155839e-01 1.4114229e-01 -8.8422664e-02 -2.2123174e-02 1.1820101e-01 -1.3697834e-01 6.6175269e-02 4.7904776e-02 -1.3065630e-01 1.2796187e-01 -4.1583582e-02 -7.1989327e-02 1.3848302e-01 -1.1441228e-01 1.5518775e-02 9.3523621e-02 -1.4140391e-01 9.6809580e-02 1.1095792e-02 -1.1174479e-01 1.3931550e-01 -7.5777352e-02 -3.7317285e-02 1.2600735e-01 -1.3229176e-01 5.2060674e-02 6.2216794e-02 -1.3580604e-01 1.2058153e-01 -2.6499720e-02 -8.4912243e-02 1.4079372e-01 -1.0459963e-01 1.0000000e-01 -9.8416932e-02 -4.4421521e-03 1.0459963e-01 -1.4114229e-01 9.1845824e-02 1.3308925e-02 -1.1036953e-01 1.4030621e-01 -8.4912243e-02 -2.2123174e-02 1.1570384e-01 -1.3891640e-01 7.7643552e-02 3.0850113e-02 -1.2058153e-01 1.3697834e-01 -7.0068437e-02 -3.9455301e-02 1.2498333e-01 -1.3449970e-01 6.2216794e-02 4.7904776e-02 -1.2889189e-01 1.3149025e-01 -5.4119610e-02 -5.6165193e-02 1.3229176e-01 -1.2796187e-01 4.5808841e-02 6.4203952e-02 -1.3516954e-01 1.2392848e-01 -3.7317285e-02 -7.1989327e-02 1.3751387e-01 -1.1940600e-01 2.8678454e-02 7.9490594e-02 -1.3931550e-01 1.1441228e-01 -1.9926443e-02 -8.6678147e-02 1.4056731e-01 -1.0896703e-01 1.1095792e-02 9.3523621e-02 -1.4126436e-01 1.0309173e-01 -2.2213501e-03 -1.0000000e-01 1.4140391e-01 -9.6809580e-02 -6.6618581e-03 1.0608172e-01 -1.4098540e-01 9.0145365e-02 1.5518775e-02 -1.1174479e-01 1.4001049e-01 -8.3125388e-02 -2.4314447e-02 1.1696686e-01 -1.3848302e-01 7.5777352e-02 3.3014160e-02 -1.2172730e-01 1.3640902e-01 -6.8130258e-02 -4.1583582e-02 1.2600735e-01 -1.3379667e-01 6.0214285e-02 4.9988892e-02 -1.2979010e-01 1.3065630e-01 -5.2060674e-02 -5.8196919e-02 1.3306063e-01 -1.2700028e-01 4.3701602e-02 6.6175269e-02 -1.3580604e-01 1.2284305e-01 -3.5170061e-02 -7.3892456e-02 1.3801547e-01 -1.1820101e-01 2.6499720e-02 8.1318023e-02 -1.3968022e-01 1.1309249e-01 -1.7724796e-02 -8.8422664e-02 1.4079372e-01 -1.0753764e-01 8.8799204e-03 9.5178342e-02 -1.4135157e-01 1.0155839e-01 1.0000000e-01 -1.0155839e-01 4.4421521e-03 9.5178342e-02 -1.4114229e-01 1.0753764e-01 -1.3308925e-02 -8.8422664e-02 1.4030621e-01 -1.1309249e-01 2.2123174e-02 8.1318023e-02 -1.3891640e-01 1.1820101e-01 -3.0850113e-02 -7.3892456e-02 1.3697834e-01 -1.2284305e-01 3.9455301e-02 6.6175269e-02 -1.3449970e-01 1.2700028e-01 -4.7904776e-02 -5.8196919e-02 1.3149025e-01 -1.3065630e-01 5.6165193e-02 4.9988892e-02 -1.2796187e-01 1.3379667e-01 -6.4203952e-02 -4.1583582e-02 1.2392848e-01 -1.3640902e-01 7.1989327e-02 3.3014160e-02 -1.1940600e-01 1.3848302e-01 -7.9490594e-02 -2.4314447e-02 1.1441228e-01 -1.4001049e-01 8.6678147e-02 1.5518775e-02 -1.0896703e-01 1.4098540e-01 -9.3523621e-02 -6.6618581e-03 1.0309173e-01 -1.4140391e-01 1.0000000e-01 -2.2213501e-03 -9.6809580e-02 1.4126436e-01 -1.0608172e-01 1.1095792e-02 9.0145365e-02 -1.4056731e-01 1.1174479e-01 -1.9926443e-02 -8.3125388e-02 1.3931550e-01 -1.1696686e-01 2.8678454e-02 7.5777352e-02 -1.3751387e-01 1.2172730e-01 -3.7317285e-02 -6.8130258e-02 1.3516954e-01 -1.2600735e-01 4.5808841e-02 6.0214285e-02 -1.3229176e-01 1.2979010e-01 -5.4119610e-02 -5.2060674e-02 1.2889189e-01 -1.3306063e-01 6.2216794e-02 4.3701602e-02 -1.2498333e-01 1.3580604e-01 -7.0068437e-02 -3.5170061e-02 1.2058153e-01 -1.3801547e-01 7.7643552e-02 2.6499720e-02 -1.1570384e-01 1.3968022e-01 -8.4912243e-02 -1.7724796e-02 1.1036953e-01 -1.4079372e-01 9.1845824e-02 8.8799204e-03 -1.0459963e-01 1.4135157e-01 -9.8416932e-02 1.0000000e-01 -1.0459963e-01 1.3308925e-02 8.4912243e-02 -1.3891640e-01 1.2058153e-01 -3.9455301e-02 -6.2216794e-02 1.3149025e-01 -1.3229176e-01 6.4203952e-02 3.7317285e-02 -1.1940600e-01 1.3931550e-01 -8.6678147e-02 -1.1095792e-02 1.0309173e-01 -1.4140391e-01 1.0608172e-01 -1.5518775e-02 -8.3125388e-02 1.3848302e-01 -1.2172730e-01 4.1583582e-02 6.0214285e-02 -1.3065630e-01 1.3306063e-01 -6.6175269e-02 -3.5170061e-02 1.1820101e-01 -1.3968022e-01 8.8422664e-02 8.8799204e-03 -1.0155839e-01 1.4135157e-01 -1.0753764e-01 1.7724796e-02 8.1318023e-02 -1.3801547e-01 1.2284305e-01 -4.3701602e-02 -5.8196919e-02 1.2979010e-01 -1.3379667e-01 6.8130258e-02 3.3014160e-02 -1.1696686e-01 1.4001049e-01 -9.0145365e-02 -6.6618581e-03 1.0000000e-01 -1.4126436e-01 1.0896703e-01 -1.9926443e-02 -7.9490594e-02 1.3751387e-01 -1.2392848e-01 4.5808841e-02 5.6165193e-02 -1.2889189e-01 1.3449970e-01 -7.0068437e-02 -3.0850113e-02 1.1570384e-01 -1.4030621e-01 9.1845824e-02 4.4421521e-03 -9.8416932e-02 1.4114229e-01 -1.1036953e-01 2.2123174e-02 7.7643552e-02 -1.3697834e-01 1.2498333e-01 -4.7904776e-02 -5.4119610e-02 1.2796187e-01 -1.3516954e-01 7.1989327e-02 2.8678454e-02 -1.1441228e-01 1.4056731e-01 -9.3523621e-02 -2.2213501e-03 9.6809580e-02 -1.4098540e-01 1.1174479e-01 -2.4314447e-02 -7.5777352e-02 1.3640902e-01 -1.2600735e-01 4.9988892e-02 5.2060674e-02 -1.2700028e-01 1.3580604e-01 -7.3892456e-02 -2.6499720e-02 1.1309249e-01 -1.4079372e-01 9.5178342e-02 1.0000000e-01 -1.0753764e-01 2.2123174e-02 7.3892456e-02 -1.3449970e-01 1.3065630e-01 -6.4203952e-02 -3.3014160e-02 1.1441228e-01 -1.4098540e-01 1.0000000e-01 -1.1095792e-02 -8.3125388e-02 1.3751387e-01 -1.2600735e-01 5.4119610e-02 4.3701602e-02 -1.2058153e-01 1.3968022e-01 -9.1845824e-02 -5.1966117e-16 9.1845824e-02 -1.3968022e-01 1.2058153e-01 -4.3701602e-02 -5.4119610e-02 1.2600735e-01 -1.3751387e-01 8.3125388e-02 1.1095792e-02 -1.0000000e-01 1.4098540e-01 -1.1441228e-01 3.3014160e-02 6.4203952e-02 -1.3065630e-01 1.3449970e-01 -7.3892456e-02 -2.2123174e-02 1.0753764e-01 -1.4142136e-01 1.0753764e-01 -2.2123174e-02 -7.3892456e-02 1.3449970e-01 -1.3065630e-01 6.4203952e-02 3.3014160e-02 -1.1441228e-01 1.4098540e-01 -1.0000000e-01 1.1095792e-02 8.3125388e-02 -1.3751387e-01 1.2600735e-01 -5.4119610e-02 -4.3701602e-02 1.2058153e-01 -1.3968022e-01 9.1845824e-02 2.5638427e-15 -9.1845824e-02 1.3968022e-01 -1.2058153e-01 4.3701602e-02 5.4119610e-02 -1.2600735e-01 1.3751387e-01 -8.3125388e-02 -1.1095792e-02 1.0000000e-01 -1.4098540e-01 1.1441228e-01 -3.3014160e-02 -6.4203952e-02 1.3065630e-01 -1.3449970e-01 7.3892456e-02 2.2123174e-02 -1.0753764e-01 1.4142136e-01 -1.0753764e-01 2.2123174e-02 7.3892456e-02 -1.3449970e-01 1.3065630e-01 -6.4203952e-02 -3.3014160e-02 1.1441228e-01 -1.4098540e-01 1.0000000e-01 -1.1095792e-02 -8.3125388e-02 1.3751387e-01 -1.2600735e-01 5.4119610e-02 4.3701602e-02 -1.2058153e-01 1.3968022e-01 -9.1845824e-02 1.0000000e-01 -1.1036953e-01 3.0850113e-02 6.2216794e-02 -1.2796187e-01 1.3751387e-01 -8.6678147e-02 -2.2213501e-03 9.0145365e-02 -1.3848302e-01 1.2600735e-01 -5.8196919e-02 -3.5170061e-02 1.1309249e-01 -1.4135157e-01 1.0753764e-01 -2.6499720e-02 -6.6175269e-02 1.2979010e-01 -1.3640902e-01 8.3125388e-02 6.6618581e-03 -9.3523621e-02 1.3931550e-01 -1.2392848e-01 5.4119610e-02 3.9455301e-02 -1.1570384e-01 1.4114229e-01 -1.0459963e-01 2.2123174e-02 7.0068437e-02 -1.3149025e-01 1.3516954e-01 -7.9490594e-02 -1.1095792e-02 9.6809580e-02 -1.4001049e-01 1.2172730e-01 -4.9988892e-02 -4.3701602e-02 1.1820101e-01 -1.4079372e-01 1.0155839e-01 -1.7724796e-02 -7.3892456e-02 1.3306063e-01 -1.3379667e-01 7.5777352e-02 1.5518775e-02 -1.0000000e-01 1.4056731e-01 -1.1940600e-01 4.5808841e-02 4.7904776e-02 -1.2058153e-01 1.4030621e-01 -9.8416932e-02 1.3308925e-02 7.7643552e-02 -1.3449970e-01 1.3229176e-01 -7.1989327e-02 -1.9926443e-02 1.0309173e-01 -1.4098540e-01 1.1696686e-01 -4.1583582e-02 -5.2060674e-02 1.2284305e-01 -1.3968022e-01 9.5178342e-02 -8.8799204e-03 -8.1318023e-02 1.3580604e-01 -1.3065630e-01 6.8130258e-02 2.4314447e-02 -1.0608172e-01 1.4126436e-01 -1.1441228e-01 3.7317285e-02 5.6165193e-02 -1.2498333e-01 1.3891640e-01 -9.1845824e-02 4.4421521e-03 8.4912243e-02 -1.3697834e-01 1.2889189e-01 -6.4203952e-02 -2.8678454e-02 1.0896703e-01 -1.4140391e-01 1.1174479e-01 -3.3014160e-02 -6.0214285e-02 1.2700028e-01 -1.3801547e-01 8.8422664e-02 1.0000000e-01 -1.1309249e-01 3.9455301e-02 4.9988892e-02 -1.1940600e-01 1.4098540e-01 -1.0608172e-01 2.8678454e-02 6.0214285e-02 -1.2498333e-01 1.3968022e-01 -9.8416932e-02 1.7724796e-02 7.0068437e-02 -1.2979010e-01 1.3751387e-01 -9.0145365e-02 6.6618581e-03 7.9490594e-02 -1.3379667e-01 1.3449970e-01 -8.1318023e-02 -4.4421521e-03 8.8422664e-02 -1.3697834e-01 1.3065630e-01 -7.1989327e-02 -1.5518775e-02 9.6809580e-02 -1.3931550e-01 1.2600735e-01 -6.2216794e-02 -2.6499720e-02 1.0459963e-01 -1.4079372e-01 1.2058153e-01 -5.2060674e-02 -3.7317285e-02 1.1174479e-01 -1.4140391e-01 1.1441228e-01 -4.1583582e-02 -4.7904776e-02 1.1820101e-01 -1.4114229e-01 1.0753764e-01 -3.0850113e-02 -5.8196919e-02 1.2392848e-01 -1.4001049e-01 1.0000000e-01 -1.9926443e-02 -6.8130258e-02 1.2889189e-01 -1.3801547e-01 9.1845824e-02 -8.8799204e-03 -7.7643552e-02 1.3306063e-01 -1.3516954e-01 8.3125388e-02 2.2213501e-03 -8.6678147e-02 1.3640902e-01 -1.3149025e-01 7.3892456e-02 1.3308925e-02 -9.5178342e-02 1.3891640e-01 -1.2700028e-01 6.4203952e-02 2.4314447e-02 -1.0309173e-01 1.4056731e-01 -1.2172730e-01 5.4119610e-02 3.5170061e-02 -1.1036953e-01 1.4135157e-01 -1.1570384e-01 4.3701602e-02 4.5808841e-02 -1.1696686e-01 1.4126436e-01 -1.0896703e-01 3.3014160e-02 5.6165193e-02 -1.2284305e-01 1.4030621e-01 -1.0155839e-01 2.2123174e-02 6.6175269e-02 -1.2796187e-01 1.3848302e-01 -9.3523621e-02 1.1095792e-02 7.5777352e-02 -1.3229176e-01 1.3580604e-01 -8.4912243e-02 1.0000000e-01 -1.1570384e-01 4.7904776e-02 3.7317285e-02 -1.0896703e-01 1.4098540e-01 -1.2172730e-01 5.8196919e-02 2.6499720e-02 -1.0155839e-01 1.3968022e-01 -1.2700028e-01 6.8130258e-02 1.5518775e-02 -9.3523621e-02 1.3751387e-01 -1.3149025e-01 7.7643552e-02 4.4421521e-03 -8.4912243e-02 1.3449970e-01 -1.3516954e-01 8.6678147e-02 -6.6618581e-03 -7.5777352e-02 1.3065630e-01 -1.3801547e-01 9.5178342e-02 -1.7724796e-02 -6.6175269e-02 1.2600735e-01 -1.4001049e-01 1.0309173e-01 -2.8678454e-02 -5.6165193e-02 1.2058153e-01 -1.4114229e-01 1.1036953e-01 -3.9455301e-02 -4.5808841e-02 1.1441228e-01 -1.4140391e-01 1.1696686e-01 -4.9988892e-02 -3.5170061e-02 1.0753764e-01 -1.4079372e-01 1.2284305e-01 -6.0214285e-02 -2.4314447e-02 1.0000000e-01 -1.3931550e-01 1.2796187e-01 -7.0068437e-02 -1.3308925e-02 9.1845824e-02 -1.3697834e-01 1.3229176e-01 -7.9490594e-02 -2.2213501e-03 8.3125388e-02 -1.3379667e-01 1.3580604e-01 -8.8422664e-02 8.8799204e-03 7.3892456e-02 -1.2979010e-01 1.3848302e-01 -9.6809580e-02 1.9926443e-02 6.4203952e-02 -1.2498333e-01 1.4030621e-01 -1.0459963e-01 3.0850113e-02 5.4119610e-02 -1.1940600e-01 1.4126436e-01 -1.1174479e-01 4.1583582e-02 4.3701602e-02 -1.1309249e-01 1.4135157e-01 -1.1820101e-01 5.2060674e-02 3.3014160e-02 -1.0608172e-01 1.4056731e-01 -1.2392848e-01 6.2216794e-02 2.2123174e-02 -9.8416932e-02 1.3891640e-01 -1.2889189e-01 7.1989327e-02 1.1095792e-02 -9.0145365e-02 1.3640902e-01 -1.3306063e-01 8.1318023e-02 1.0000000e-01 -1.1820101e-01 5.6165193e-02 2.4314447e-02 -9.6809580e-02 1.3751387e-01 -1.3306063e-01 8.4912243e-02 -8.8799204e-03 -7.0068437e-02 1.2600735e-01 -1.4056731e-01 1.0896703e-01 -4.1583582e-02 -3.9455301e-02 1.0753764e-01 -1.4030621e-01 1.2700028e-01 -7.1989327e-02 -6.6618581e-03 8.3125388e-02 -1.3229176e-01 1.3801547e-01 -9.8416932e-02 2.6499720e-02 5.4119610e-02 -1.1696686e-01 1.4140391e-01 -1.1940600e-01 5.8196919e-02 2.2123174e-02 -9.5178342e-02 1.3697834e-01 -1.3379667e-01 8.6678147e-02 -1.1095792e-02 -6.8130258e-02 1.2498333e-01 -1.4079372e-01 1.1036953e-01 -4.3701602e-02 -3.7317285e-02 1.0608172e-01 -1.4001049e-01 1.2796187e-01 -7.3892456e-02 -4.4421521e-03 8.1318023e-02 -1.3149025e-01 1.3848302e-01 -1.0000000e-01 2.8678454e-02 5.2060674e-02 -1.1570384e-01 1.4135157e-01 -1.2058153e-01 6.0214285e-02 1.9926443e-02 -9.3523621e-02 1.3640902e-01 -1.3449970e-01 8.8422664e-02 -1.3308925e-02 -6.6175269e-02 1.2392848e-01 -1.4098540e-01 1.1174479e-01 -4.5808841e-02 -3.5170061e-02 1.0459963e-01 -1.3968022e-01 1.2889189e-01 -7.5777352e-02 -2.2213501e-03 7.9490594e-02 -1.3065630e-01 1.3891640e-01 -1.0155839e-01 3.0850113e-02 4.9988892e-02 -1.1441228e-01 1.4126436e-01 -1.2172730e-01 6.2216794e-02 1.7724796e-02 -9.1845824e-02 1.3580604e-01 -1.3516954e-01 9.0145365e-02 -1.5518775e-02 -6.4203952e-02 1.2284305e-01 -1.4114229e-01 1.1309249e-01 -4.7904776e-02 -3.3014160e-02 1.0309173e-01 -1.3931550e-01 1.2979010e-01 -7.7643552e-02 1.0000000e-01 -1.2058153e-01 6.4203952e-02 1.1095792e-02 -8.3125388e-02 1.3065630e-01 -1.3968022e-01 1.0753764e-01 -4.3701602e-02 -3.3014160e-02 1.0000000e-01 -1.3751387e-01 1.3449970e-01 -9.1845824e-02 2.2123174e-02 5.4119610e-02 -1.1441228e-01 1.4098540e-01 -1.2600735e-01 7.3892456e-02 1.0394099e-15 -7.3892456e-02 1.2600735e-01 -1.4098540e-01 1.1441228e-01 -5.4119610e-02 -2.2123174e-02 9.1845824e-02 -1.3449970e-01 1.3751387e-01 -1.0000000e-01 3.3014160e-02 4.3701602e-02 -1.0753764e-01 1.3968022e-01 -1.3065630e-01 8.3125388e-02 -1.1095792e-02 -6.4203952e-02 1.2058153e-01 -1.4142136e-01 1.2058153e-01 -6.4203952e-02 -1.1095792e-02 8.3125388e-02 -1.3065630e-01 1.3968022e-01 -1.0753764e-01 4.3701602e-02 3.3014160e-02 -1.0000000e-01 1.3751387e-01 -1.3449970e-01 9.1845824e-02 -2.2123174e-02 -5.4119610e-02 1.1441228e-01 -1.4098540e-01 1.2600735e-01 -7.3892456e-02 -3.1182296e-15 7.3892456e-02 -1.2600735e-01 1.4098540e-01 -1.1441228e-01 5.4119610e-02 2.2123174e-02 -9.1845824e-02 1.3449970e-01 -1.3751387e-01 1.0000000e-01 -3.3014160e-02 -4.3701602e-02 1.0753764e-01 -1.3968022e-01 1.3065630e-01 -8.3125388e-02 1.1095792e-02 6.4203952e-02 -1.2058153e-01 1.4142136e-01 -1.2058153e-01 6.4203952e-02 1.1095792e-02 -8.3125388e-02 1.3065630e-01 -1.3968022e-01 1.0753764e-01 -4.3701602e-02 -3.3014160e-02 1.0000000e-01 -1.3751387e-01 1.3449970e-01 -9.1845824e-02 2.2123174e-02 5.4119610e-02 -1.1441228e-01 1.4098540e-01 -1.2600735e-01 7.3892456e-02 1.0000000e-01 -1.2284305e-01 7.1989327e-02 -2.2213501e-03 -6.8130258e-02 1.2058153e-01 -1.4135157e-01 1.2498333e-01 -7.5777352e-02 6.6618581e-03 6.4203952e-02 -1.1820101e-01 1.4114229e-01 -1.2700028e-01 7.9490594e-02 -1.1095792e-02 -6.0214285e-02 1.1570384e-01 -1.4079372e-01 1.2889189e-01 -8.3125388e-02 1.5518775e-02 5.6165193e-02 -1.1309249e-01 1.4030621e-01 -1.3065630e-01 8.6678147e-02 -1.9926443e-02 -5.2060674e-02 1.1036953e-01 -1.3968022e-01 1.3229176e-01 -9.0145365e-02 2.4314447e-02 4.7904776e-02 -1.0753764e-01 1.3891640e-01 -1.3379667e-01 9.3523621e-02 -2.8678454e-02 -4.3701602e-02 1.0459963e-01 -1.3801547e-01 1.3516954e-01 -9.6809580e-02 3.3014160e-02 3.9455301e-02 -1.0155839e-01 1.3697834e-01 -1.3640902e-01 1.0000000e-01 -3.7317285e-02 -3.5170061e-02 9.8416932e-02 -1.3580604e-01 1.3751387e-01 -1.0309173e-01 4.1583582e-02 3.0850113e-02 -9.5178342e-02 1.3449970e-01 -1.3848302e-01 1.0608172e-01 -4.5808841e-02 -2.6499720e-02 9.1845824e-02 -1.3306063e-01 1.3931550e-01 -1.0896703e-01 4.9988892e-02 2.2123174e-02 -8.8422664e-02 1.3149025e-01 -1.4001049e-01 1.1174479e-01 -5.4119610e-02 -1.7724796e-02 8.4912243e-02 -1.2979010e-01 1.4056731e-01 -1.1441228e-01 5.8196919e-02 1.3308925e-02 -8.1318023e-02 1.2796187e-01 -1.4098540e-01 1.1696686e-01 -6.2216794e-02 -8.8799204e-03 7.7643552e-02 -1.2600735e-01 1.4126436e-01 -1.1940600e-01 6.6175269e-02 4.4421521e-03 -7.3892456e-02 1.2392848e-01 -1.4140391e-01 1.2172730e-01 -7.0068437e-02 1.0000000e-01 -1.2498333e-01 7.9490594e-02 -1.5518775e-02 -5.2060674e-02 1.0753764e-01 -1.3801547e-01 1.3640902e-01 -1.0309173e-01 4.5808841e-02 2.2123174e-02 -8.4912243e-02 1.2796187e-01 -1.4126436e-01 1.2172730e-01 -7.3892456e-02 8.8799204e-03 5.8196919e-02 -1.1174479e-01 1.3931550e-01 -1.3449970e-01 9.8416932e-02 -3.9455301e-02 -2.8678454e-02 9.0145365e-02 -1.3065630e-01 1.4079372e-01 -1.1820101e-01 6.8130258e-02 -2.2213501e-03 -6.4203952e-02 1.1570384e-01 -1.4030621e-01 1.3229176e-01 -9.3523621e-02 3.3014160e-02 3.5170061e-02 -9.5178342e-02 1.3306063e-01 -1.4001049e-01 1.1441228e-01 -6.2216794e-02 -4.4421521e-03 7.0068437e-02 -1.1940600e-01 1.4098540e-01 -1.2979010e-01 8.8422664e-02 -2.6499720e-02 -4.1583582e-02 1.0000000e-01 -1.3516954e-01 1.3891640e-01 -1.1036953e-01 5.6165193e-02 1.1095792e-02 -7.5777352e-02 1.2284305e-01 -1.4135157e-01 1.2700028e-01 -8.3125388e-02 1.9926443e-02 4.7904776e-02 -1.0459963e-01 1.3697834e-01 -1.3751387e-01 1.0608172e-01 -4.9988892e-02 -1.7724796e-02 8.1318023e-02 -1.2600735e-01 1.4140391e-01 -1.2392848e-01 7.7643552e-02 -1.3308925e-02 -5.4119610e-02 1.0896703e-01 -1.3848302e-01 1.3580604e-01 -1.0155839e-01 4.3701602e-02 2.4314447e-02 -8.6678147e-02 1.2889189e-01 -1.4114229e-01 1.2058153e-01 -7.1989327e-02 6.6618581e-03 6.0214285e-02 -1.1309249e-01 1.3968022e-01 -1.3379667e-01 9.6809580e-02 -3.7317285e-02 -3.0850113e-02 9.1845824e-02 -1.3149025e-01 1.4056731e-01 -1.1696686e-01 6.6175269e-02 1.0000000e-01 -1.2700028e-01 8.6678147e-02 -2.8678454e-02 -3.5170061e-02 9.1845824e-02 -1.2979010e-01 1.4126436e-01 -1.2392848e-01 8.1318023e-02 -2.2123174e-02 -4.1583582e-02 9.6809580e-02 -1.3229176e-01 1.4079372e-01 -1.2058153e-01 7.5777352e-02 -1.5518775e-02 -4.7904776e-02 1.0155839e-01 -1.3449970e-01 1.4001049e-01 -1.1696686e-01 7.0068437e-02 -8.8799204e-03 -5.4119610e-02 1.0608172e-01 -1.3640902e-01 1.3891640e-01 -1.1309249e-01 6.4203952e-02 -2.2213501e-03 -6.0214285e-02 1.1036953e-01 -1.3801547e-01 1.3751387e-01 -1.0896703e-01 5.8196919e-02 4.4421521e-03 -6.6175269e-02 1.1441228e-01 -1.3931550e-01 1.3580604e-01 -1.0459963e-01 5.2060674e-02 1.1095792e-02 -7.1989327e-02 1.1820101e-01 -1.4030621e-01 1.3379667e-01 -1.0000000e-01 4.5808841e-02 1.7724796e-02 -7.7643552e-02 1.2172730e-01 -1.4098540e-01 1.3149025e-01 -9.5178342e-02 3.9455301e-02 2.4314447e-02 -8.3125388e-02 1.2498333e-01 -1.4135157e-01 1.2889189e-01 -9.0145365e-02 3.3014160e-02 3.0850113e-02 -8.8422664e-02 1.2796187e-01 -1.4140391e-01 1.2600735e-01 -8.4912243e-02 2.6499720e-02 3.7317285e-02 -9.3523621e-02 1.3065630e-01 -1.4114229e-01 1.2284305e-01 -7.9490594e-02 1.9926443e-02 4.3701602e-02 -9.8416932e-02 1.3306063e-01 -1.4056731e-01 1.1940600e-01 -7.3892456e-02 1.3308925e-02 4.9988892e-02 -1.0309173e-01 1.3516954e-01 -1.3968022e-01 1.1570384e-01 -6.8130258e-02 6.6618581e-03 5.6165193e-02 -1.0753764e-01 1.3697834e-01 -1.3848302e-01 1.1174479e-01 -6.2216794e-02 1.0000000e-01 -1.2889189e-01 9.3523621e-02 -4.1583582e-02 -1.7724796e-02 7.3892456e-02 -1.1696686e-01 1.3931550e-01 -1.3697834e-01 1.1036953e-01 -6.4203952e-02 6.6618581e-03 5.2060674e-02 -1.0155839e-01 1.3306063e-01 -1.4098540e-01 1.2392848e-01 -8.4912243e-02 3.0850113e-02 2.8678454e-02 -8.3125388e-02 1.2284305e-01 -1.4079372e-01 1.3379667e-01 -1.0309173e-01 5.4119610e-02 4.4421521e-03 -6.2216794e-02 1.0896703e-01 -1.3640902e-01 1.3968022e-01 -1.1820101e-01 7.5777352e-02 -1.9926443e-02 -3.9455301e-02 9.1845824e-02 -1.2796187e-01 1.4140391e-01 -1.2979010e-01 9.5178342e-02 -4.3701602e-02 -1.5518775e-02 7.1989327e-02 -1.1570384e-01 1.3891640e-01 -1.3751387e-01 1.1174479e-01 -6.6175269e-02 8.8799204e-03 4.9988892e-02 -1.0000000e-01 1.3229176e-01 -1.4114229e-01 1.2498333e-01 -8.6678147e-02 3.3014160e-02 2.6499720e-02 -8.1318023e-02 1.2172730e-01 -1.4056731e-01 1.3449970e-01 -1.0459963e-01 5.6165193e-02 2.2213501e-03 -6.0214285e-02 1.0753764e-01 -1.3580604e-01 1.4001049e-01 -1.1940600e-01 7.7643552e-02 -2.2123174e-02 -3.7317285e-02 9.0145365e-02 -1.2700028e-01 1.4135157e-01 -1.3065630e-01 9.6809580e-02 -4.5808841e-02 -1.3308925e-02 7.0068437e-02 -1.1441228e-01 1.3848302e-01 -1.3801547e-01 1.1309249e-01 -6.8130258e-02 1.1095792e-02 4.7904776e-02 -9.8416932e-02 1.3149025e-01 -1.4126436e-01 1.2600735e-01 -8.8422664e-02 3.5170061e-02 2.4314447e-02 -7.9490594e-02 1.2058153e-01 -1.4030621e-01 1.3516954e-01 -1.0608172e-01 5.8196919e-02 1.0000000e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -3.1183172e-16 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 9.3549515e-16 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -5.5429941e-16 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 1.1779628e-15 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.8016263e-15 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 2.4252897e-15 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -1.0392348e-15 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 3.6726166e-15 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -2.2865617e-15 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 9.0050676e-16 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 -3.5338885e-15 5.4119610e-02 -1.0000000e-01 1.3065630e-01 -1.4142136e-01 1.3065630e-01 -1.0000000e-01 5.4119610e-02 6.1672703e-15 -5.4119610e-02 1.0000000e-01 -1.3065630e-01 1.4142136e-01 -1.3065630e-01 1.0000000e-01 -5.4119610e-02 1.0000000e-01 -1.3229176e-01 1.0608172e-01 -6.6175269e-02 1.7724796e-02 3.3014160e-02 -7.9490594e-02 1.1570384e-01 -1.3697834e-01 1.4056731e-01 -1.2600735e-01 9.5178342e-02 -5.2060674e-02 2.2213501e-03 4.7904776e-02 -9.1845824e-02 1.2392848e-01 -1.4001049e-01 1.3801547e-01 -1.1820101e-01 8.3125388e-02 -3.7317285e-02 -1.3308925e-02 6.2216794e-02 -1.0309173e-01 1.3065630e-01 -1.4135157e-01 1.3379667e-01 -1.0896703e-01 7.0068437e-02 -2.2123174e-02 -2.8678454e-02 7.5777352e-02 -1.1309249e-01 1.3580604e-01 -1.4098540e-01 1.2796187e-01 -9.8416932e-02 5.6165193e-02 -6.6618581e-03 -4.3701602e-02 8.8422664e-02 -1.2172730e-01 1.3931550e-01 -1.3891640e-01 1.2058153e-01 -8.6678147e-02 4.1583582e-02 8.8799204e-03 -5.8196919e-02 1.0000000e-01 -1.2889189e-01 1.4114229e-01 -1.3516954e-01 1.1174479e-01 -7.3892456e-02 2.6499720e-02 2.4314447e-02 -7.1989327e-02 1.1036953e-01 -1.3449970e-01 1.4126436e-01 -1.2979010e-01 1.0155839e-01 -6.0214285e-02 1.1095792e-02 3.9455301e-02 -8.4912243e-02 1.1940600e-01 -1.3848302e-01 1.3968022e-01 -1.2284305e-01 9.0145365e-02 -4.5808841e-02 -4.4421521e-03 5.4119610e-02 -9.6809580e-02 1.2700028e-01 -1.4079372e-01 1.3640902e-01 -1.1441228e-01 7.7643552e-02 -3.0850113e-02 -1.9926443e-02 6.8130258e-02 -1.0753764e-01 1.3306063e-01 -1.4140391e-01 1.3149025e-01 -1.0459963e-01 6.4203952e-02 -1.5518775e-02 -3.5170061e-02 8.1318023e-02 -1.1696686e-01 1.3751387e-01 -1.4030621e-01 1.2498333e-01 -9.3523621e-02 4.9988892e-02 1.0000000e-01 -1.3379667e-01 1.1174479e-01 -7.7643552e-02 3.5170061e-02 1.1095792e-02 -5.6165193e-02 9.5178342e-02 -1.2392848e-01 1.3931550e-01 -1.3968022e-01 1.2498333e-01 -9.6809580e-02 5.8196919e-02 -1.3308925e-02 -3.3014160e-02 7.5777352e-02 -1.1036953e-01 1.3306063e-01 -1.4140391e-01 1.3449970e-01 -1.1309249e-01 7.9490594e-02 -3.7317285e-02 -8.8799204e-03 5.4119610e-02 -9.3523621e-02 1.2284305e-01 -1.3891640e-01 1.4001049e-01 -1.2600735e-01 9.8416932e-02 -6.0214285e-02 1.5518775e-02 3.0850113e-02 -7.3892456e-02 1.0896703e-01 -1.3229176e-01 1.4135157e-01 -1.3516954e-01 1.1441228e-01 -8.1318023e-02 3.9455301e-02 6.6618581e-03 -5.2060674e-02 9.1845824e-02 -1.2172730e-01 1.3848302e-01 -1.4030621e-01 1.2700028e-01 -1.0000000e-01 6.2216794e-02 -1.7724796e-02 -2.8678454e-02 7.1989327e-02 -1.0753764e-01 1.3149025e-01 -1.4126436e-01 1.3580604e-01 -1.1570384e-01 8.3125388e-02 -4.1583582e-02 -4.4421521e-03 4.9988892e-02 -9.0145365e-02 1.2058153e-01 -1.3801547e-01 1.4056731e-01 -1.2796187e-01 1.0155839e-01 -6.4203952e-02 1.9926443e-02 2.6499720e-02 -7.0068437e-02 1.0608172e-01 -1.3065630e-01 1.4114229e-01 -1.3640902e-01 1.1696686e-01 -8.4912243e-02 4.3701602e-02 2.2213501e-03 -4.7904776e-02 8.8422664e-02 -1.1940600e-01 1.3751387e-01 -1.4079372e-01 1.2889189e-01 -1.0309173e-01 6.6175269e-02 -2.2123174e-02 -2.4314447e-02 6.8130258e-02 -1.0459963e-01 1.2979010e-01 -1.4098540e-01 1.3697834e-01 -1.1820101e-01 8.6678147e-02 -4.5808841e-02 1.0000000e-01 -1.3516954e-01 1.1696686e-01 -8.8422664e-02 5.2060674e-02 -1.1095792e-02 -3.0850113e-02 7.0068437e-02 -1.0309173e-01 1.2700028e-01 -1.3968022e-01 1.4001049e-01 -1.2796187e-01 1.0459963e-01 -7.1989327e-02 3.3014160e-02 8.8799204e-03 -4.9988892e-02 8.6678147e-02 -1.1570384e-01 1.3449970e-01 -1.4140391e-01 1.3580604e-01 -1.1820101e-01 9.0145365e-02 -5.4119610e-02 1.3308925e-02 2.8678454e-02 -6.8130258e-02 1.0155839e-01 -1.2600735e-01 1.3931550e-01 -1.4030621e-01 1.2889189e-01 -1.0608172e-01 7.3892456e-02 -3.5170061e-02 -6.6618581e-03 4.7904776e-02 -8.4912243e-02 1.1441228e-01 -1.3379667e-01 1.4135157e-01 -1.3640902e-01 1.1940600e-01 -9.1845824e-02 5.6165193e-02 -1.5518775e-02 -2.6499720e-02 6.6175269e-02 -1.0000000e-01 1.2498333e-01 -1.3891640e-01 1.4056731e-01 -1.2979010e-01 1.0753764e-01 -7.5777352e-02 3.7317285e-02 4.4421521e-03 -4.5808841e-02 8.3125388e-02 -1.1309249e-01 1.3306063e-01 -1.4126436e-01 1.3697834e-01 -1.2058153e-01 9.3523621e-02 -5.8196919e-02 1.7724796e-02 2.4314447e-02 -6.4203952e-02 9.8416932e-02 -1.2392848e-01 1.3848302e-01 -1.4079372e-01 1.3065630e-01 -1.0896703e-01 7.7643552e-02 -3.9455301e-02 -2.2213501e-03 4.3701602e-02 -8.1318023e-02 1.1174479e-01 -1.3229176e-01 1.4114229e-01 -1.3751387e-01 1.2172730e-01 -9.5178342e-02 6.0214285e-02 -1.9926443e-02 -2.2123174e-02 6.2216794e-02 -9.6809580e-02 1.2284305e-01 -1.3801547e-01 1.4098540e-01 -1.3149025e-01 1.1036953e-01 -7.9490594e-02 4.1583582e-02 1.0000000e-01 -1.3640902e-01 1.2172730e-01 -9.8416932e-02 6.8130258e-02 -3.3014160e-02 -4.4421521e-03 4.1583582e-02 -7.5777352e-02 1.0459963e-01 -1.2600735e-01 1.3848302e-01 -1.4114229e-01 1.3379667e-01 -1.1696686e-01 9.1845824e-02 -6.0214285e-02 2.4314447e-02 1.3308925e-02 -4.9988892e-02 8.3125388e-02 -1.1036953e-01 1.2979010e-01 -1.4001049e-01 1.4030621e-01 -1.3065630e-01 1.1174479e-01 -8.4912243e-02 5.2060674e-02 -1.5518775e-02 -2.2123174e-02 5.8196919e-02 -9.0145365e-02 1.1570384e-01 -1.3306063e-01 1.4098540e-01 -1.3891640e-01 1.2700028e-01 -1.0608172e-01 7.7643552e-02 -4.3701602e-02 6.6618581e-03 3.0850113e-02 -6.6175269e-02 9.6809580e-02 -1.2058153e-01 1.3580604e-01 -1.4140391e-01 1.3697834e-01 -1.2284305e-01 1.0000000e-01 -7.0068437e-02 3.5170061e-02 2.2213501e-03 -3.9455301e-02 7.3892456e-02 -1.0309173e-01 1.2498333e-01 -1.3801547e-01 1.4126436e-01 -1.3449970e-01 1.1820101e-01 -9.3523621e-02 6.2216794e-02 -2.6499720e-02 -1.1095792e-02 4.7904776e-02 -8.1318023e-02 1.0896703e-01 -1.2889189e-01 1.3968022e-01 -1.4056731e-01 1.3149025e-01 -1.1309249e-01 8.6678147e-02 -5.4119610e-02 1.7724796e-02 1.9926443e-02 -5.6165193e-02 8.8422664e-02 -1.1441228e-01 1.3229176e-01 -1.4079372e-01 1.3931550e-01 -1.2796187e-01 1.0753764e-01 -7.9490594e-02 4.5808841e-02 -8.8799204e-03 -2.8678454e-02 6.4203952e-02 -9.5178342e-02 1.1940600e-01 -1.3516954e-01 1.4135157e-01 -1.3751387e-01 1.2392848e-01 -1.0155839e-01 7.1989327e-02 -3.7317285e-02 1.0000000e-01 -1.3751387e-01 1.2600735e-01 -1.0753764e-01 8.3125388e-02 -5.4119610e-02 2.2123174e-02 1.1095792e-02 -4.3701602e-02 7.3892456e-02 -1.0000000e-01 1.2058153e-01 -1.3449970e-01 1.4098540e-01 -1.3968022e-01 1.3065630e-01 -1.1441228e-01 9.1845824e-02 -6.4203952e-02 3.3014160e-02 6.9188947e-17 -3.3014160e-02 6.4203952e-02 -9.1845824e-02 1.1441228e-01 -1.3065630e-01 1.3968022e-01 -1.4098540e-01 1.3449970e-01 -1.2058153e-01 1.0000000e-01 -7.3892456e-02 4.3701602e-02 -1.1095792e-02 -2.2123174e-02 5.4119610e-02 -8.3125388e-02 1.0753764e-01 -1.2600735e-01 1.3751387e-01 -1.4142136e-01 1.3751387e-01 -1.2600735e-01 1.0753764e-01 -8.3125388e-02 5.4119610e-02 -2.2123174e-02 -1.1095792e-02 4.3701602e-02 -7.3892456e-02 1.0000000e-01 -1.2058153e-01 1.3449970e-01 -1.4098540e-01 1.3968022e-01 -1.3065630e-01 1.1441228e-01 -9.1845824e-02 6.4203952e-02 -3.3014160e-02 -2.0756684e-16 3.3014160e-02 -6.4203952e-02 9.1845824e-02 -1.1441228e-01 1.3065630e-01 -1.3968022e-01 1.4098540e-01 -1.3449970e-01 1.2058153e-01 -1.0000000e-01 7.3892456e-02 -4.3701602e-02 1.1095792e-02 2.2123174e-02 -5.4119610e-02 8.3125388e-02 -1.0753764e-01 1.2600735e-01 -1.3751387e-01 1.4142136e-01 -1.3751387e-01 1.2600735e-01 -1.0753764e-01 8.3125388e-02 -5.4119610e-02 2.2123174e-02 1.1095792e-02 -4.3701602e-02 7.3892456e-02 -1.0000000e-01 1.2058153e-01 -1.3449970e-01 1.4098540e-01 -1.3968022e-01 1.3065630e-01 -1.1441228e-01 9.1845824e-02 -6.4203952e-02 3.3014160e-02 1.0000000e-01 -1.3848302e-01 1.2979010e-01 -1.1570384e-01 9.6809580e-02 -7.3892456e-02 4.7904776e-02 -1.9926443e-02 -8.8799204e-03 3.7317285e-02 -6.4203952e-02 8.8422664e-02 -1.0896703e-01 1.2498333e-01 -1.3580604e-01 1.4098540e-01 -1.4030621e-01 1.3379667e-01 -1.2172730e-01 1.0459963e-01 -8.3125388e-02 5.8196919e-02 -3.0850113e-02 2.2213501e-03 2.6499720e-02 -5.4119610e-02 7.9490594e-02 -1.0155839e-01 1.1940600e-01 -1.3229176e-01 1.3968022e-01 -1.4126436e-01 1.3697834e-01 -1.2700028e-01 1.1174479e-01 -9.1845824e-02 6.8130258e-02 -4.1583582e-02 1.3308925e-02 1.5518775e-02 -4.3701602e-02 7.0068437e-02 -9.3523621e-02 1.1309249e-01 -1.2796187e-01 1.3751387e-01 -1.4135157e-01 1.3931550e-01 -1.3149025e-01 1.1820101e-01 -1.0000000e-01 7.7643552e-02 -5.2060674e-02 2.4314447e-02 4.4421521e-03 -3.3014160e-02 6.0214285e-02 -8.4912243e-02 1.0608172e-01 -1.2284305e-01 1.3449970e-01 -1.4056731e-01 1.4079372e-01 -1.3516954e-01 1.2392848e-01 -1.0753764e-01 8.6678147e-02 -6.2216794e-02 3.5170061e-02 -6.6618581e-03 -2.2123174e-02 4.9988892e-02 -7.5777352e-02 9.8416932e-02 -1.1696686e-01 1.3065630e-01 -1.3891640e-01 1.4140391e-01 -1.3801547e-01 1.2889189e-01 -1.1441228e-01 9.5178342e-02 -7.1989327e-02 4.5808841e-02 -1.7724796e-02 -1.1095792e-02 3.9455301e-02 -6.6175269e-02 9.0145365e-02 -1.1036953e-01 1.2600735e-01 -1.3640902e-01 1.4114229e-01 -1.4001049e-01 1.3306063e-01 -1.2058153e-01 1.0309173e-01 -8.1318023e-02 5.6165193e-02 -2.8678454e-02 1.0000000e-01 -1.3931550e-01 1.3306063e-01 -1.2284305e-01 1.0896703e-01 -9.1845824e-02 7.1989327e-02 -4.9988892e-02 2.6499720e-02 -2.2213501e-03 -2.2123174e-02 4.5808841e-02 -6.8130258e-02 8.8422664e-02 -1.0608172e-01 1.2058153e-01 -1.3149025e-01 1.3848302e-01 -1.4135157e-01 1.4001049e-01 -1.3449970e-01 1.2498333e-01 -1.1174479e-01 9.5178342e-02 -7.5777352e-02 5.4119610e-02 -3.0850113e-02 6.6618581e-03 1.7724796e-02 -4.1583582e-02 6.4203952e-02 -8.4912243e-02 1.0309173e-01 -1.1820101e-01 1.2979010e-01 -1.3751387e-01 1.4114229e-01 -1.4056731e-01 1.3580604e-01 -1.2700028e-01 1.1441228e-01 -9.8416932e-02 7.9490594e-02 -5.8196919e-02 3.5170061e-02 -1.1095792e-02 -1.3308925e-02 3.7317285e-02 -6.0214285e-02 8.1318023e-02 -1.0000000e-01 1.1570384e-01 -1.2796187e-01 1.3640902e-01 -1.4079372e-01 1.4098540e-01 -1.3697834e-01 1.2889189e-01 -1.1696686e-01 1.0155839e-01 -8.3125388e-02 6.2216794e-02 -3.9455301e-02 1.5518775e-02 8.8799204e-03 -3.3014160e-02 5.6165193e-02 -7.7643552e-02 9.6809580e-02 -1.1309249e-01 1.2600735e-01 -1.3516954e-01 1.4030621e-01 -1.4126436e-01 1.3801547e-01 -1.3065630e-01 1.1940600e-01 -1.0459963e-01 8.6678147e-02 -6.6175269e-02 4.3701602e-02 -1.9926443e-02 -4.4421521e-03 2.8678454e-02 -5.2060674e-02 7.3892456e-02 -9.3523621e-02 1.1036953e-01 -1.2392848e-01 1.3379667e-01 -1.3968022e-01 1.4140391e-01 -1.3891640e-01 1.3229176e-01 -1.2172730e-01 1.0753764e-01 -9.0145365e-02 7.0068437e-02 -4.7904776e-02 2.4314447e-02 1.0000000e-01 -1.4001049e-01 1.3580604e-01 -1.2889189e-01 1.1940600e-01 -1.0753764e-01 9.3523621e-02 -7.7643552e-02 6.0214285e-02 -4.1583582e-02 2.2123174e-02 -2.2213501e-03 -1.7724796e-02 3.7317285e-02 -5.6165193e-02 7.3892456e-02 -9.0145365e-02 1.0459963e-01 -1.1696686e-01 1.2700028e-01 -1.3449970e-01 1.3931550e-01 -1.4135157e-01 1.4056731e-01 -1.3697834e-01 1.3065630e-01 -1.2172730e-01 1.1036953e-01 -9.6809580e-02 8.1318023e-02 -6.4203952e-02 4.5808841e-02 -2.6499720e-02 6.6618581e-03 1.3308925e-02 -3.3014160e-02 5.2060674e-02 -7.0068437e-02 8.6678147e-02 -1.0155839e-01 1.1441228e-01 -1.2498333e-01 1.3306063e-01 -1.3848302e-01 1.4114229e-01 -1.4098540e-01 1.3801547e-01 -1.3229176e-01 1.2392848e-01 -1.1309249e-01 1.0000000e-01 -8.4912243e-02 6.8130258e-02 -4.9988892e-02 3.0850113e-02 -1.1095792e-02 -8.8799204e-03 2.8678454e-02 -4.7904776e-02 6.6175269e-02 -8.3125388e-02 9.8416932e-02 -1.1174479e-01 1.2284305e-01 -1.3149025e-01 1.3751387e-01 -1.4079372e-01 1.4126436e-01 -1.3891640e-01 1.3379667e-01 -1.2600735e-01 1.1570384e-01 -1.0309173e-01 8.8422664e-02 -7.1989327e-02 5.4119610e-02 -3.5170061e-02 1.5518775e-02 4.4421521e-03 -2.4314447e-02 4.3701602e-02 -6.2216794e-02 7.9490594e-02 -9.5178342e-02 1.0896703e-01 -1.2058153e-01 1.2979010e-01 -1.3640902e-01 1.4030621e-01 -1.4140391e-01 1.3968022e-01 -1.3516954e-01 1.2796187e-01 -1.1820101e-01 1.0608172e-01 -9.1845824e-02 7.5777352e-02 -5.8196919e-02 3.9455301e-02 -1.9926443e-02 1.0000000e-01 -1.4056731e-01 1.3801547e-01 -1.3379667e-01 1.2796187e-01 -1.2058153e-01 1.1174479e-01 -1.0155839e-01 9.0145365e-02 -7.7643552e-02 6.4203952e-02 -4.9988892e-02 3.5170061e-02 -1.9926443e-02 4.4421521e-03 1.1095792e-02 -2.6499720e-02 4.1583582e-02 -5.6165193e-02 7.0068437e-02 -8.3125388e-02 9.5178342e-02 -1.0608172e-01 1.1570384e-01 -1.2392848e-01 1.3065630e-01 -1.3580604e-01 1.3931550e-01 -1.4114229e-01 1.4126436e-01 -1.3968022e-01 1.3640902e-01 -1.3149025e-01 1.2498333e-01 -1.1696686e-01 1.0753764e-01 -9.6809580e-02 8.4912243e-02 -7.1989327e-02 5.8196919e-02 -4.3701602e-02 2.8678454e-02 -1.3308925e-02 -2.2213501e-03 1.7724796e-02 -3.3014160e-02 4.7904776e-02 -6.2216794e-02 7.5777352e-02 -8.8422664e-02 1.0000000e-01 -1.1036953e-01 1.1940600e-01 -1.2700028e-01 1.3306063e-01 -1.3751387e-01 1.4030621e-01 -1.4140391e-01 1.4079372e-01 -1.3848302e-01 1.3449970e-01 -1.2889189e-01 1.2172730e-01 -1.1309249e-01 1.0309173e-01 -9.1845824e-02 7.9490594e-02 -6.6175269e-02 5.2060674e-02 -3.7317285e-02 2.2123174e-02 -6.6618581e-03 -8.8799204e-03 2.4314447e-02 -3.9455301e-02 5.4119610e-02 -6.8130258e-02 8.1318023e-02 -9.3523621e-02 1.0459963e-01 -1.1441228e-01 1.2284305e-01 -1.2979010e-01 1.3516954e-01 -1.3891640e-01 1.4098540e-01 -1.4135157e-01 1.4001049e-01 -1.3697834e-01 1.3229176e-01 -1.2600735e-01 1.1820101e-01 -1.0896703e-01 9.8416932e-02 -8.6678147e-02 7.3892456e-02 -6.0214285e-02 4.5808841e-02 -3.0850113e-02 1.5518775e-02 1.0000000e-01 -1.4098540e-01 1.3968022e-01 -1.3751387e-01 1.3449970e-01 -1.3065630e-01 1.2600735e-01 -1.2058153e-01 1.1441228e-01 -1.0753764e-01 1.0000000e-01 -9.1845824e-02 8.3125388e-02 -7.3892456e-02 6.4203952e-02 -5.4119610e-02 4.3701602e-02 -3.3014160e-02 2.2123174e-02 -1.1095792e-02 -1.5937968e-15 1.1095792e-02 -2.2123174e-02 3.3014160e-02 -4.3701602e-02 5.4119610e-02 -6.4203952e-02 7.3892456e-02 -8.3125388e-02 9.1845824e-02 -1.0000000e-01 1.0753764e-01 -1.1441228e-01 1.2058153e-01 -1.2600735e-01 1.3065630e-01 -1.3449970e-01 1.3751387e-01 -1.3968022e-01 1.4098540e-01 -1.4142136e-01 1.4098540e-01 -1.3968022e-01 1.3751387e-01 -1.3449970e-01 1.3065630e-01 -1.2600735e-01 1.2058153e-01 -1.1441228e-01 1.0753764e-01 -1.0000000e-01 9.1845824e-02 -8.3125388e-02 7.3892456e-02 -6.4203952e-02 5.4119610e-02 -4.3701602e-02 3.3014160e-02 -2.2123174e-02 1.1095792e-02 4.7813905e-15 -1.1095792e-02 2.2123174e-02 -3.3014160e-02 4.3701602e-02 -5.4119610e-02 6.4203952e-02 -7.3892456e-02 8.3125388e-02 -9.1845824e-02 1.0000000e-01 -1.0753764e-01 1.1441228e-01 -1.2058153e-01 1.2600735e-01 -1.3065630e-01 1.3449970e-01 -1.3751387e-01 1.3968022e-01 -1.4098540e-01 1.4142136e-01 -1.4098540e-01 1.3968022e-01 -1.3751387e-01 1.3449970e-01 -1.3065630e-01 1.2600735e-01 -1.2058153e-01 1.1441228e-01 -1.0753764e-01 1.0000000e-01 -9.1845824e-02 8.3125388e-02 -7.3892456e-02 6.4203952e-02 -5.4119610e-02 4.3701602e-02 -3.3014160e-02 2.2123174e-02 -1.1095792e-02 1.0000000e-01 -1.4126436e-01 1.4079372e-01 -1.4001049e-01 1.3891640e-01 -1.3751387e-01 1.3580604e-01 -1.3379667e-01 1.3149025e-01 -1.2889189e-01 1.2600735e-01 -1.2284305e-01 1.1940600e-01 -1.1570384e-01 1.1174479e-01 -1.0753764e-01 1.0309173e-01 -9.8416932e-02 9.3523621e-02 -8.8422664e-02 8.3125388e-02 -7.7643552e-02 7.1989327e-02 -6.6175269e-02 6.0214285e-02 -5.4119610e-02 4.7904776e-02 -4.1583582e-02 3.5170061e-02 -2.8678454e-02 2.2123174e-02 -1.5518775e-02 8.8799204e-03 -2.2213501e-03 -4.4421521e-03 1.1095792e-02 -1.7724796e-02 2.4314447e-02 -3.0850113e-02 3.7317285e-02 -4.3701602e-02 4.9988892e-02 -5.6165193e-02 6.2216794e-02 -6.8130258e-02 7.3892456e-02 -7.9490594e-02 8.4912243e-02 -9.0145365e-02 9.5178342e-02 -1.0000000e-01 1.0459963e-01 -1.0896703e-01 1.1309249e-01 -1.1696686e-01 1.2058153e-01 -1.2392848e-01 1.2700028e-01 -1.2979010e-01 1.3229176e-01 -1.3449970e-01 1.3640902e-01 -1.3801547e-01 1.3931550e-01 -1.4030621e-01 1.4098540e-01 -1.4135157e-01 1.4140391e-01 -1.4114229e-01 1.4056731e-01 -1.3968022e-01 1.3848302e-01 -1.3697834e-01 1.3516954e-01 -1.3306063e-01 1.3065630e-01 -1.2796187e-01 1.2498333e-01 -1.2172730e-01 1.1820101e-01 -1.1441228e-01 1.1036953e-01 -1.0608172e-01 1.0155839e-01 -9.6809580e-02 9.1845824e-02 -8.6678147e-02 8.1318023e-02 -7.5777352e-02 7.0068437e-02 -6.4203952e-02 5.8196919e-02 -5.2060674e-02 4.5808841e-02 -3.9455301e-02 3.3014160e-02 -2.6499720e-02 1.9926443e-02 -1.3308925e-02 6.6618581e-03 1.0000000e-01 -1.4140391e-01 1.4135157e-01 -1.4126436e-01 1.4114229e-01 -1.4098540e-01 1.4079372e-01 -1.4056731e-01 1.4030621e-01 -1.4001049e-01 1.3968022e-01 -1.3931550e-01 1.3891640e-01 -1.3848302e-01 1.3801547e-01 -1.3751387e-01 1.3697834e-01 -1.3640902e-01 1.3580604e-01 -1.3516954e-01 1.3449970e-01 -1.3379667e-01 1.3306063e-01 -1.3229176e-01 1.3149025e-01 -1.3065630e-01 1.2979010e-01 -1.2889189e-01 1.2796187e-01 -1.2700028e-01 1.2600735e-01 -1.2498333e-01 1.2392848e-01 -1.2284305e-01 1.2172730e-01 -1.2058153e-01 1.1940600e-01 -1.1820101e-01 1.1696686e-01 -1.1570384e-01 1.1441228e-01 -1.1309249e-01 1.1174479e-01 -1.1036953e-01 1.0896703e-01 -1.0753764e-01 1.0608172e-01 -1.0459963e-01 1.0309173e-01 -1.0155839e-01 1.0000000e-01 -9.8416932e-02 9.6809580e-02 -9.5178342e-02 9.3523621e-02 -9.1845824e-02 9.0145365e-02 -8.8422664e-02 8.6678147e-02 -8.4912243e-02 8.3125388e-02 -8.1318023e-02 7.9490594e-02 -7.7643552e-02 7.5777352e-02 -7.3892456e-02 7.1989327e-02 -7.0068437e-02 6.8130258e-02 -6.6175269e-02 6.4203952e-02 -6.2216794e-02 6.0214285e-02 -5.8196919e-02 5.6165193e-02 -5.4119610e-02 5.2060674e-02 -4.9988892e-02 4.7904776e-02 -4.5808841e-02 4.3701602e-02 -4.1583582e-02 3.9455301e-02 -3.7317285e-02 3.5170061e-02 -3.3014160e-02 3.0850113e-02 -2.8678454e-02 2.6499720e-02 -2.4314447e-02 2.2123174e-02 -1.9926443e-02 1.7724796e-02 -1.5518775e-02 1.3308925e-02 -1.1095792e-02 8.8799204e-03 -6.6618581e-03 4.4421521e-03 -2.2213501e-03 nipy-0.6.1/nipy/modalities/fmri/tests/dct_5.txt000066400000000000000000000006251470056100100214410ustar00rootroot00000000000000 4.4721360e-01 6.0150096e-01 5.1166727e-01 3.7174803e-01 1.9543951e-01 4.4721360e-01 3.7174803e-01 -1.9543951e-01 -6.0150096e-01 -5.1166727e-01 4.4721360e-01 3.8726732e-17 -6.3245553e-01 -1.1618020e-16 6.3245553e-01 4.4721360e-01 -3.7174803e-01 -1.9543951e-01 6.0150096e-01 -5.1166727e-01 4.4721360e-01 -6.0150096e-01 5.1166727e-01 -3.7174803e-01 1.9543951e-01 nipy-0.6.1/nipy/modalities/fmri/tests/get_td_dd.m000066400000000000000000000010601470056100100217670ustar00rootroot00000000000000% Make three basis functions from SPM HRF % Use code from spm_get_bf to get SPM HRF, temporal derivative and peak % dispersion derivative as used by SPM. % Use high time resolution dt = 0.01; [hrf, params] = spm_hrf(dt); % Time derivative time_d = 1; % time offset in seconds p = params; p(6) = p(6) + time_d; off_by_1 = spm_hrf(dt, p); dhrf = (hrf - off_by_1) / time_d; % Dispersion derivative disp_d = 0.01; % dispersion parameter offset p = params; p(3) = p(3) + disp_d; ddhrf = (hrf - spm_hrf(dt, p)) / disp_d; save spm_bases.mat -6 hrf dhrf ddhrf dt nipy-0.6.1/nipy/modalities/fmri/tests/make_hrfs.m000066400000000000000000000012561470056100100220200ustar00rootroot00000000000000% Use SPM spm_hrf function to create HRF time courses % Try different dt values, and values for peak, undershoot parameters hrfs = {}; params = {}; for dt = [0.5, 1, 1.5] upk = 16; udsp = 1; rat = 6; for ppk = [5, 6, 7] for pdsp = [0.5, 1, 1.5] params{end+1} = [dt, ppk, upk, pdsp, udsp, rat]; hrfs{end+1} = spm_hrf(dt, [ppk, upk, pdsp, udsp, rat]); end end ppk = 6; pdsp = 1; for upk = [5, 6, 7] for udsp = [0.5, 1, 1.5] params{end+1} = [dt, ppk, upk, pdsp, udsp, rat]; hrfs{end+1} = spm_hrf(dt, [ppk, upk, pdsp, udsp, rat]); end end end save hrfs.mat -6 params hrfs nipy-0.6.1/nipy/modalities/fmri/tests/spm_bases.mat000066400000000000000000002266101470056100100223650ustar00rootroot00000000000000MATLAB 5.0 MAT-file, written by Octave 3.6.2, 2013-09-11 13:16:00 UTC IM8d hrf dVYfJ=T*V=bAt=\ء=XF.W=U>;͉ =r J=^=-9V>>["r>=Rz">W,>584>tv<>Ƞ/C>%I>P>2ɿU>.b6$ \>/Y<%Ta>t73f>s唍sk>dp>?{it>C!x>cRNf}>t> >P>=(>Z->[iY>4>7*>C՘>*̓N>X>'->.~>k>O*m˩>Ht:Ϭ>b7 >a5,ѱ>μ>!aϵ>3 >l}jr>=1>%9˿>K1`>@>ȓz`>mq>̩Z>V夺b>S>>bi>'@d>>>v>D*>f Aw>T>g>;p%>Һ0>">$,s>i1>\>|̸>I6>?43>}ik@>;Y`HE>S4XG>-̊>ZZrH>-bt>y #8C>,[->xvM>Տ>:>d">ވ>g>)G5 >#+>U8U>-Hor>P#8>ަ>5-_>eB]?΍ˈ?3?l+?H?.,*&? 8U? 6?_?*Di?cM?E@w18 ?G( ?AKP ?7f\ ?{z ?$?EG'3?kH#?~`e?k B?\?`Wl?2S? g?~pE?2{i ?b;SJ?4<?N?s=Y?Q?EB ?W?J?Nu˅?HՖH?cK?OQ?=?UUw?=AL?t ?M^ ? ?.c|a!?ZE!?Z`~z J"?dN"?e9#?GF1[#?+0$?.}$?x/%?Zș:%?6b4&?uA&?aA'?&'?U(?G(?\q)?ɮ*?]Er*?2Vm(+?ԟ٪+?*2V,?t*,?njf-?d(.?p .?h/?0?p?X0??((0?<@-0?6iT1?Yߩ1??n2?qzX2?&W 2?`p 3?U{c3?=ZI3?VY4?qچw4?R"4?xp35?#̒5?'15?N#TS6?Ox6?r7??p{7?M7?֩D8?/S8?9?;?3x9?l.M9?+I:?ȸ/Ͳ:?g/;?2Q;?od1;?*B*`?Z>?_q>?íB?K0C?)|lmC?~cC?BC?:/;(D?0fD?69ۥD?su] D?Pgu$E?] dE?3E?oKE?}n$F?G[fdF?;F?(F84F?azV'G?1iG?ܼG?O?JNfSO?.1eHO?I~2O?/P?r3P?_ VP??xP?!N P?mP? P?YR?s S?C/'S?nrIS?s"kS?H>"S?5(1گS?^eS?*S?Q<T?]6T?4e#XT?tzT?œ~T?_۲T?)NT?LcT?*1 U?W*+#AU?\IbU?%@كU?tMU?M}U?h-U?V?u8H'V?PGV?`hV?cB\?k\?V ]?P ]?$W#&8]?.)O]?Qe]?r*|]?O?$]?bW]?,pj]?H`@M]?N@8]?Q^?U_^?gv8*^?H=?^?ܙDT^?a="i^?˵g{}^?衑^?+P^?wjn|X^?L^?E^?8;Zp^?3[h_?-_?ſ+_?U>_?D(]KP_?LCb_?2<3_ t_?my_?_?9l#%_?_?A_?]/p_?y`I_?qC_?at`?y|T `?bwj`?s`?NQX$`?w+`?".3`?@Tr:`?zA`?a?'c>a?8^>a?iW>a?,{>a?-~aV>a?`>a?K~=a?ǢS`=a?ai3a?4&1a?CP0a?=s .a?-a?<„N+a?uy)a?'a?R%a?}J"#a?,Z!a?C a?~a?9}^pa?Hva?xpa?}>a?Ӆ+'#a?6_ a?Q a?"]a?ja?Qnd1a?8^,m`?El5x7`?9o1`?5t`? ^'`?S `? %N`?o#s`?IȽ`?4j`?]`?-u3`?X7`?΀*`?zG% `?ӳ`?z߫>`??dJ`?`?ߍ3'v`?/R`?࿪F_`?԰V`?qp9`?# C`?n`?u`?TM`?6 `?,J`?'-_`?E-{`?KQu`?)͝p`?#d?k`?w1e`?SV``?:Z`?{3U`? O`?vI`?X & D`?MA>`?Sb8`?PLu2`?՚z,`?d_~r&`?'\ `?`:`?Ku" `?wn `?Ŭ%`?1_,`?͑_?*/_?_? 'ܦ_?/|_?@:_?}ߦ_?m?m_?(Z_?"bB~_?cp_?es怺b_?wT_?P̹^?&p#^?XG^?v^?7,^? [^?c<]?jӛ ]? g\?y-.\?xc\?}8\\?ǿ\?{F֯\?<#Ү\?堄\?Bs\?F _b\?$u+Q\?;?\?bw.\?L+I\?,SGP \?x[?$[?Jz[?jJ[?EQ[?%[?a[?RE[?m[?2ZX[[?7I[?!=8[?>VT|&[?S)[?q\/[?6,Z?8!Z?fbZ?OZ?tϧZ?qZ?Z)qZ?^ؒ7qZ??,^Z?chLZ?K^:Z?q8$(Z?GhZ?R;?HZ?eY?FRoY?Y?=>~Y?Y?wY?WY?yT <[oY?|\Y?w*JY?/Ƌ7Y?L $Y?>?Y?0X?gQ7X?.X?ǼpvX? X?aX?֣g9X?s|X?iX?mlVLVX?DX?o?1X?YlX?^ X?%y rW?SSW?ߙGW?QGp*W?XNJW?`لiW?zW?ɣtW?`c-aW?Z%C#NW?cjo;W?qX )W?8`"W?XdZV:W?0eQV?б4hV?o~V?LpV?2_V?.zhÑV?5}~V?Z7kV?i" YV?^xM$FV?K>3V?sv:Y V?`6u V? o\U?#=ɰU?(U?-UU?LJU?:s9U?QN_U?\BHvU?_cU?i\>PU?I:>U?xI+?+U?rU?+U?.T?+T?xC[T?}[לT?0T?\S(T?3_sT?goT?|T]T?(gJT?%\7T?>%T?|T?IS?,_HS?Y-DS?K$S?dmS?͔S?mdS?c~S?TkS?dYS?P/FS?A=BC4S?3!S?̷,zS?R?R?/:rR?'$R?{sܳR?dR?UIF\R?hw$}R?FjR?Y$XR?aFR?I~4R?c"R?Y9VMR?Zp>Q?$5Q?)3Q?`6Q?jFF@Q?QPQ?pfQ?DX Q?&nQ?<0\Q?m5 KQ?)H99Q?rw'Q?!CrnQ?:KQ?p[P?P?%P??}P?ҴP?81bP?2P?VewP?eP?CTP?% CP?}1P?-l P?D!rP?8ՈO?Q܊1O?öO? WdO?rO?qOO?=-O?'P O?g\uN?vuN?TN?ݧ#N?aN?#@N?hiN?߰RM?coy5>M?(M?`UM?\vM?E UM?Hխv4M?{UMNM?VR6L?0L?;L?y5XL?]nL?y5ML?Y-L?-v L?$=%K?J oK?UK?K?/hjK?v4JK?*K?9I| K?ZJ?mZJ? XBD?4&D?5^? D?"#C?z-,C?8X{C?/! fC?|C?fC?`UKC?YE0C?C?sB?GB?i{OB?}δB?oCE B?( uB?[B?K9@B?BETg&B??3. B?"!vA?A??{A?i9 A?6d1A?YRkpA?H:VA?'t=A?Dċ#A?*p-( A?A@?X@?>u@?5@?ϋ@?%SQr@?3Y@?R@@?8#(@?Th@?@~??JR??F4L??YlZ??$ß)??.>?6c2>?.gc>?r?]P?h>?Ohye@8>?*f>?Ʋ=?9$#=?Yz}y=? y,˖:?si:?T=U=:?Ȝ4T:?^9?9#d9?BY#9?Ё_9?ఓ 39?*79?L̦Z8?t48?Kv8?oIY8? '.8?~R)8?[N7?tV7?Ac7? E_FX7?=aPF.7? Ӱ7?Z96?26?tr6?s\6?b[36?mt 6?tb!5?U5?6 zώ5?Rf5?,bO&Z=5?/(5?\Ln4? Tz,4? 4?Ct4?._3L4?S"z$4?4>3?n3?L3?LU3?*_3?4WM283?OV+!3?˘ݾv2?l2?wo2?1i@x2?R2?AI,2?ـ 62?؋Nt1?WԺ1? =U1?Do1?mJJ1?%1?v1?T0?Ag[ 0?HUq0?1D-m0?ΞI0?&Xe%0?J+L0?-z8/?\r/?Ϩ/+/?T.?DOXb.?W*V.?"j=.?C5 -?jy-?o U`>-?C,?Ss(,?n,?.H),?Qف+?ZfC+?q6$HC]+? ܀+?#'*?<*? SO*?Z *?8O)?|~`)?E)?#s\F)? 's(?xu(?Gz ?(?x'?6]'?@2}'?B>&i='?uH&?ke&? &",~&?=?&?M2&?~ \7b%?2t̂%?y)dqD%?>P%?ˆIj$?!$?~^'?d1?'W}?j?j?e?oI?0=ݾ?g= ?ڃj| ?5A ?8 ?sP> ?!q?=W??!u:K?0I O?q$?h4?H,d?)?(?CӔ>?d4?>-?>>i|6s>Q;>>>1ðK>=>1>QrF1>>YO;|>9|(G>;>]>\F>s*1s>:Dw>>)">g)Q> ɠE>]F>nO>N?Q5%zNȾ\~Ѿ1־Gl۾qAAheh$6k( gv 0"AJEޔ?ZsNMwNj}{rtLU ́~NK;>;1svbZday(_'":61HR8M̬ރf<G\]vY'ZqədteЗ66Դ1ף,UNߝ='-y+FM AB ^ p#Z ݹ `d ^ I!sl r} q ; AIt(Cu R9bcxhŷȯE&p>y4*L޹`r*S ޠ=L~P*K; gԹwFgߚ|;sߺWDd=4&4=RBNn|sRAL )hȜybչJ65 qi~ Cȥz0߱~ĦƭW'V PK/ C}s-CYbT!ږG\S\n&ر3#CgjDKi5`BY3kbeFȗ~99'Op,\v]38pߑb?S:GN~J=Fum]5P FoL9\gug\nrvfe2F\lJZnfwr oz>p\D$'[&qRAt}er \%vn 蔾( 1= ^>|DR rf X*kM{ Rx8 }^ڣ A a n8 Lр l!vF!NTUL-!Vr=A!|SaT!2|fg!TMz!N~!W!›N!cK!'Rs!HVC!e=Y!eCoR "-"gu0"MB"F&nT"Q|ve"Hؿv"<"FQ"h"yWĺ"i}"\H"jp"1("KE #|p#Ek-#UE r=#HHM#֦j]#s&l#$v %|#4B"*#5ך#wWg#`#+u#w#h&#Z#[a$jV$~Oik$ -$<$Kx J$tFW$e$qys$7ے/$cX$ s5$KD$wwu$=h!|$/$%hhk$4$tv$x%D]%TU%(%4%#:@%CL%|]_^X%rd%Tu8p%-{{%`M%=i`Q%9_% 1%3L%yyM%mG&7%,%7 %$2_%9^%V% &Yi&xN{&wc"(&wb2&C;&в4E&GQoO&VCY&Nb&tk&~Bu&2|~&&j&<&~s]}&dC&hG<&۳ϋ&!M &nx&x{p&ӣ &0V1&ҁB&e\<&Q64 &B%h'ȹޥ '[0G'p'ԀG$'+'D.h2'`y$:'ʹAA'<JH'}^B(ó( "(1ew&([а>*(2-(uQJ1(p/4( 8(V'*R;(Xr>(xe$=A(z8D(G( ´dJ({oM(v"4P(U R(OYU(ثX(7l&Z(}w1](_[_(,a(c($f(32h(vDCj(Il(Om(Ϩoo(prq(ՅCs(E|t(JDv(ibGw(!V&y(}&~z(JB{(o8|(%"~(]j7(9J<(0(#(ye(b(Z`_(oC( (~([i($(ܠD(A(0(!u(NA(gO(Nb('#ۇ(N鲇(Ut{(`+4(߆(qFz(Y9|(wPƒ(i*(1LQ(n>(@(cfF(}9=(S(w[('BU~(G؇@}(c>|(4Rz( y(a_x(|w(Xu(ț1#t(tr("D q(ݙ3jo(Żm(Wk(_I6j(_h(K{zf(%d(F_Pb( *|`(Vxc^(G;<\(d'HZ(DNW(T[yU(x~S( +ǶP(;CBN(A/K(of3I(O0F(B/\gC(==A(ӷ}>(:;(TMm8([X5( 3(C,0(),(Wg)(&((B#(\ (x((0i(۰ (9(< (6((Q(vHK( '3'J"'mpS'cy's'|9$'u''h'N_ry'vl*S'&/#'fo$'$'+'=Ԩ8'>f3'.L-''/{('TN"'tl'νt'Ŗ;'d) 'S:',j1'F s&1)&~q&|&N$&\V<&L&#sT&uBT&L&4<&o6$&.ʊ&ݭ&W&IAw& 8&"/&ڝ&!%KA"%§h%JUh%qD%H%ʽ%n*D%=$}%GN%Y%%ޙ%D =%CV%ko %W`|%aaau%#==n%4}ޢf%B;_%6*HW%&[P%>$eH%7fgA%6ҫ9%Ih^2%ю%*%@#%"~%S/%Fn %~4b%e/_$W q$vZ\$}yf$MH$&$`$G$&'$%ՑS$eR{$ $/6$70$ٗ$R$Ÿa$&Q$nx$x!p$|V$h$n5"`$.qX$1 WP$7H$?$pǓ7$//$q'$5-͈$bdd$&]W=$U:E$YY#>##q|$|#E)yCC#2$#G߃|#6#.4D>#6[#<#f9DjY#eq#%#}g|X#)#%M#nwG Fh> 5 ƒC, j$ J@7 >_ ˆ _ 7ídvPOp$dLSʥ YNs8S PbΓP' ?FG^-JغM9  xr: }0}ۖۆj<Ҏ 5)=}oHk Z"DGHX6:tj%~cCJڋW 0}NLJy#Ie˪dsOfYև0&<_v{;uoXZ^AXmCM]/<!5 +iS h `+[P$gQh(+:c9HQ:kpHj0_;Nijb=+,_?*-g&E` jn G4$qW}wK[y:`> /0vtEČbv*]GB7uf&3Ԏ4Ԥ)YЋr0JYرDRbdsj>Jp&*`xN3O S?~Y~/iw@[ azHYd@ns :K!}Jo](gVsE0ʐRI,xKWrqkb@yR-CF_)3#qu$\6*Vzk=3o%@<0 ƒ?)5tqqV88K13)懣{I9ue[ٽJ㪇?ojnKMMNw>0GGg왒1%N/ȃXg J|U̼.Q^\\d2zCzb켢nJjNXk nYO<%K4'vb @\OG84*ǒ1wNvZ2v[[@Ϧ'&2LP P3$o)v'?knîT؄w7:jj vꍮ]灡E@q-T#<,U&_8MGZYXy y𱜰EK:,Qu{n;hEWh;?Ie ('|F3u!8'_&d:[igp/qY,'kC7 X,>cÀcR(-.rqk{ | G[2{eӚPJ qb:bB$(>rHKdE(1Թ{BD*gz2Te5Pv;J'r9Z󾙃O cuh"AYw0{P{oWDe[O*Gp3 Wy,@ vX~*%zWGA8F`2pۖѰ׏򾢛AmUp8%,]`Jl 7򾆖 $0{16]`Й ԗ=񾩖rE em4wQb@B쥷c#_"2?vk)|ȂioXJ,z/ӣxBr뾧γP 뾌[x뾣$lZ[꾥oRp,T7| nii!(q,澟x澍C\/}+徴J2L60;,;@j*;!lRG:e #徐V{ 0A ;m \`NM+侦侦 Sw!i侲KR侵a#;Tg$侺0xSՆ>\t㾙w%i(s̞,G㾎Irեl\㾂z#G猢o1/lJ0l⾓8={*A$4Qp⾃B⾛.T߆'%q⾾|&].;IH\uh3W⾁q :Tᾅ1K>#iLᾧN>|!wiSwFUᾘOA]>=-DH@Qᾧ*GHBg/C྅lB:E*VY;cyQXImK6KZྜBxHྐྵΨs5\&"^T:l߾&-߾jʒ߾4߾mmi߾E߾#!߾&8޾E'NUV޾̶޾3$h޾%E(p޾ M޾iX*޾H D޾ݾVH ݾ<ݾ|Bd}ݾBH~ F[ݾ ~K9ݾttݾfMܾEt0ܾ?òܾc~yܾ_QpܾiSMOܾ}k.ܾ}> ܾ#RZf۾z۔۾![<۾@3۾Ik۾PK۾|.,۾MB? ۾yھ; ھa.}9ھ3ھZpھ V{Qھ!e W2ھ7=ھq;)پhپܐ~پ.e1YپS|پ0un^پb @پ-)A#پ}پiؾʏ !ؾ1ؾ=Dؾvsؾ[PVؾmm=:ؾ6;ؾäpAؾpjG׾B8n׾3ګ׾hp׾Nt׾Xb%Y׾w=׾|e9 "׾Dږ׾hBi־MC־+;־xQ־P-N־d*d־B>J־a8Ҿ3ü|_"ҾQu Ҿ~Ѿ8M}Ѿ貨.IѾPN念ѾNѾѾ~DuѾ/*`ѾBڠvKѾ{6Ѿ`!ѾT Ѿ]о(~QxоMо:оs(о_Ѝоyƹ}оiоy) UоOAоb둮-о/оi nо]ϾԚ=ϾX,;Ͼp(qϾf 0KϾ9$Ͼ!7;bҤf;1ZB;VG;]ͻ̾̾+̾J^@̾]m̾T J̾]/'̾nx̾=˾i˾1jB˾oj@t){˾h/O7Y˾qk7˾ %˾~!HʾTʾ羱ʾʾܽoʾH Oʾ&15q.ʾ ] ʾ+.ɾ`ɾimzɾ9qɾcmɾ'DNɾt.ɾ ɾqdnUȾ]RMȾxeiȾoȾUs uȾ}dVȾ} =8Ⱦ ȾޘǾ0 Ǿ-AǾØǾ[Ǿ#خgǾH/mJǾ"4M-Ǿ%OǾ3rƾ7۷ƾV[ƾsƾX$Nƾ+eƾ_fsIƾO-ƾJ؟:ƾK[.(ž]žkž}-žFmžOJlžbBQžj97ž)jž|?ž{s^ľ8Jľzβľ/bq.ľ[[,~ľt5dľKľC}1ľľ޶襗þL{Qþ=_)þ?þ,R&0þ\`þLhþPþ ,7þDþ L þ/$S¾Ș¾'?¾ێ3'¾dZURv¾y}w¾!vh`¾ I¾!1¾]KU¾&¾-k*7F6lP ^{,oIe@ `O8Ay&#U.l RuW.r @~’4/]zG3vB6DaL67{s"x DhW󿾶DWʿ,WQxO}Pۛ@(54LWؾMɷ,//'/aD:@3^A{ƽ#tjy)Sri+.afq,⼾eƚ8̃x2sꚆN@*׫~kỾQGCSW JhK.u{R/.0k ,cW躾d oźE.8iY]46r;U[_ZnkՂչx>WfWqé7P$ @/Zq)(K͸keyƺ2ŌQlDL QT"--𗪏 `4#ηSs ྯ^#ŐKqܔ%FS#490;]*/  ڶ!`J{9oc5DE#("$m ' y磣ѵ;_Kv~Vu|_cC'5ش'S:v \ݗ$vϱԴr[͝i0RݗgS5Ls 1?#5d[H⳾4[ȳaޭĖړYEy3`ːFj= -|W:%&fJlBᲾ3Բ=Ȳ =Xp摖>}0uQbe|8LE4'q=챾x߮Աb cBꍱx֎vΣP_\e/HzUZ+1@D,k:y-X찾;͉ =r J=^=-9V>>["r>=Rz">W,>584>tv<>Ƞ/C>%I>P>2ɿU>.b6$ \>/Y<%Ta>t73f>s唍sk>dp>?{it>C!x>cRNf}>t> >P>=(>Z->[iY>4>7*>C՘>*̓N>X>'->.~>k>O*m˩>Ht:Ϭ>b7 >a5,ѱ>μ>!aϵ>3 >l}jr>=1>%9˿>K1`>@>ȓz`>mq>̩Z>V夺b>S>>bi>'@d>>>v>D*>f Aw>T>g>;p%>Һ0>">$,s>i1>\>|̸>I6>?43>}ik@>;Y`HE>S4XG>-̊>ZZrH>-bt>y #8C>,[->xvM>Տ>:>d">ވ>g>)G5 >#+>U8U>-Hor>P#8>ަ>5-_>eB]?΍ˈ?3?l+?H?(*&?svR?)y??ڐi?M?!&,8 ?>Dn<( ?G= ?Q= ?+QH ?{m$?yC2?g#?g7Hw?>ӀtA?oJ?xsw@k?By?,?JBC?G?I7.?ȢV8? H?h?VJ?k??Ozy?N:?F`?-"?w"?8Y2&##?;1#?B&$?~$?y>.$?v%?@%?H[t&?n_&?e\?Kx'?D[)'?f(?Дd )?)?iki**?g *?r>d#/+?#뎼+?)aYJ,?*1xR,?ܺQj-?3-?Iu.?X#/?ڱ/?\J'0?u$/s0?o L0?H 1?RX1?:1?#+}1?9B2?)뼑2?=Mj2?b03?S}3?-_3? &ɩ!4?˽ rr4?/ 4?Xdi5?Hg5?qӹ5?U"G 6?^6? 36?U7?LJ'OX7?s˫7?0o7?f68S8?ަ"8?(8?3GO9?Az9?}9? L:?_ke:?9:?k I;?~@t;?;?F?-<>?{Dm>?O/7??!/??QX??i;@?ΊA@?$Pj@?Ѫޒ@?[@? L@? A?&+4A?\A?9r(A?r0TA?^A?3A?+"B?bIB?lfoB?B?#0=B? 5z+B?dC?SG-C?ZrRC?hUxXwC?6]C?4sRC?bC?x")D?@C+D?7ND?y-z;qD?KO=D?;zD?,dD?XD?9y;E?%G?^3G?PkMG?K gG?U,DG?* G?iiG?ڼWG?tG?PtrG?1QbxH?hBs$H?o:H?(OH?dH?pHxH?A2H?:Ǘ^H?1QdH?lH?e1oH?BH?ښjH?PI I?zRI?B.+I?:I?lPII?XI?fPeI?)}[sI?; TII?"՗I?OI?)B6I?Xq;I?aŹI?bA I?_iI?fI?e#!I? 6DI?tPI?~QI?|&>I?n J?׶J?“ J?U8J?PJ?]J?ly!8J?7K`!J?:av#J?^%J?ov&J?q'J?fy>(J?A(J?m:'J?IEW&J?g :kx%J? ʚ#J?]tE!J?8SyJ?}#7J?p =~J?K{OJ?R[HJ?&B J?jpGJ?:_I?1ЌI?*I?HI?[[{I?F(:;I?>~I?cjI?DٽI?bN}'ٳI?-iI?=;I?J@I?uuI?RCc{I?nI?h)aI?{pTI?;dFI?#f8I?)')I?nI?RI I?a͌urH?ɝE5H?H?[f`H?"H?&UH?n*/'H? iH?F$nH?ZGV[H?tSGH?Ù3H?@׏/H?%g H?JGDG?G?m@?)Z˨@?@?_@?:@? @?R ?صx >?f>?_*>?T溰=?W=?4N5=? ?j ?E,J? ?.ӥ?96b?cG+>E>Kr>0>$>N+> >oN*E|6پw5ųka.(!17h2V8ɣiZ,@HP2Ω5DuT @YMb Xm:;@ 5x>@ۭ@@WQA@)@`OD?eed<-0E8`2Y2D,f"_$@4{ `K|,?d `* 7Q!`N!Вn8<"F w" $##h44 $#6z$ $$Җ[%(l%ĕH:&ȭu& v's‚' h'(PhPZ((I.(8e/)8ccw)!*PF}i*8Oa*pH26+q֜+`'x,Ef,~I,0vj^--c-(n3m-/fR.`>.*=/xbrq/H?{+C/D[0$Ψ{D0wr0\1c:12&+2DK$U2[2x}2q24P2BY/K3D;r343Xޮ03ejw3<4 H 44>Y4\E~4`)^ȣ4h0U4Lv4 =Se5P\35xQkW5Xy5PqЮc5mLJ5hT5\о6"6δ>C6td6ܺ<96\&Y6 Aj6H3A63jC77=7[7qx7D 7tBѲ7@7d;\7#8tA"8PWF=8?jKX8<9s8dvY+8Ȭk8 /{8x[8r\̚8+ 9| uj#9D9Y;9\'R9<* Fj9D9lyI9S9sd9:'9)1^9@SZ:>:$.:@U bB:xPTV:xׂi:pWQ}:,8 ^:H::8˂5:N:ؠ:؈'7:he:a ; ?;柰.;>;kN;zP^;נm;U|;&8l;H;wqj;$ ;xp0;(;>Q<;@;͘;l<0=i=3\=9W=>=5s=(Va =g=XuS&=49<@g)2;=;?;Z՚:::1Z:>p:L7/:W}:l@:j:UZ:@d:EU<:jhy:Lm:va:<|)U:4H: `<::/:dY#:_ѿ: :io_9N9?9`:9fl@9p^rE99DDբ9|7cױ9BC9fUD{9 (Bm9vA`9hWES9>OE9La89V ,*9ëQ9 }9:9M8i8"޲8L8d'8SS8P$u88\"8 80$lw8]ti8lS[8*9tM8&SS?80*18U"8ԭ8ýg8ʃ:@7fLS7ݡ7hI7,@7E^܄7rA7.7<27ڡNv7At6h75Y7fx%K7\$<7.7բai78Y7|+72~66m*16px( 6r ͸6vp6t:=]66}6o6DOY`6'Q6?B6Z36u0%6~`6R6c {5z&5Cu5t;5zāc5x5}l5wՐ5i5)s5uڗBd51tHfU5/eF5`oH75k(55- 5<4`4ҁ:4yL4/4;?4_l#4M4x4 O_fv4]Lg4PY4>3J4N)rf;4.~,4{k]4c| 4vU1D43̀3px3z;3LجlD3B*3ĕ5ӧ33w߭l3{3Srm3Gh^3.O3:A3{~23 #3RI3zZ3By<"2ϼ2,Z2bQV2.Q2y%~2ά@>2K2"2;u2#=g2GX2ssJ2)<2꯻-2yKf2r2p32ᛁ1c>11$Q1M䒻1dc1>z81f ^1h1t1&f1A~X1qJ1u<1{(Ѭ.1 11jW1d<0vsu0M 0"00[03C0ã0&0:GC0&@z0/wl0zzE_._0q"Q06cC0IO603(0<.0 0%0aWgU/"78l/]&/ lþ/ y/3#XC_/@ٍD/˒2)/5k/iS.(\p.?].2a.܏sY.$敞q.qIDV.i<.k".RW.keW--%-ⴺ-^ڈ=ܠ-*o-HSm-S-Ё=P:-B &k -s-Ri, E,雞,SdM,kXg_ ,n,DU,A+<,M#,4ꇈ ,}~+tgR+҃+o +N`+)9}u+lS]+ңD+p,+La+f=/*Ԑ*m2*юES*z%**Qni*g06Q*9* r"*4< *Tq)(PŴ)j).d)M!ғ)T\M|)d)XmM) (`6)EXu)NqL)4+W(X"c5()a (x$D(+ A"(7}(j-[f(˃^BO(V8(~Z"(62v (9'!UW'cM.':@p'z''~n'HIX'P"B'|,'Q!:k'&lW)&71?.&[Z&3r@“&U9'ے&>J0}&%g&DER&Q <&Fe '&l&P+jN%l%bL%ʕ%v)nPs%dy^^%k|V}%\h%-pS%,g~>%<3)%Q %D%>n$1KP$p$Pz$6öz$x$7p$]m\$&V-H$ 3$/${s $+ v#䃵#W#_# 5 #HQ?#3#A~Sl#.Y#NoE#B2#x1Ƒ#gj!! #l"\g"C"-߽"gI"T"s"gq"ϮU4i^"ZwK"tE8"삏%"`"Ci*"S'v!c2! 2!8!5!ɨ!3<=~!k! spY!aEG!? 5!V"!!8on ן ٙ Ztܦ 歲 ;qT 躊 P [6o C<] NpWhKL (1: m,) ( Ug; b]JԩT#eR٥ȁ9V_l/86<ҌiմK`makzbp9NkJC-" 1-52Mj~i8'D e&D #X 4  *`_`>FW$: 37Vs~MOD^4 >$p.0Ux srt!)5כb4fܥCS}$pl15kn[ӊI2XLlMy/9.t&¶7#ƘlPzi]ڴ`F?)"!+Ndu+tR̠!{Sqx-ST*6ۿz 5hI‘2(G_m38+Pw4X~> X9e,uk,y&%!osXDSSf7 (B>B 6Tޏ%eL˒O w5uN\*Aӎt&Q bi'G)"$LeƟNL7jHPpk5**ASahHJ@u=A~t(Vd~/}Jz&[02UtsUcKb9fŰ!RaNx~dEO5Kx2\;!Sho.h[xuRtmʵ kk^qOkZ@_2SK虵:bH"  >61IJkj@2Rx"" r`pGlG^#b \{3j49!d ,˥  v 2ZG |7 Xjy }! Nr? ^'d{_ ȝ!Z1 GX \̮u Zq  z K]L 2j ~ vk HWD |j ƥ`= 6N/ BE ( Z5ҋ S4_ /H3 0 To%t F5XL0\-GxNs{ݫ`ch@V`h+1WaKYDX@ÓbnW]@-O0 (>!*]=] 34 ] \B`ꗸK<(f,sJ>Pz1QhBӉʜtLC2,L$#)?mljz^Eu<ðiXoN6DPV ݉qh-a8ܱQ\H(u}Q_3(z^W M,N3oWseF!}P40Uڻ t ] q/J:]@`@y磼0O`m/'`.35:2m1z$HDЀ~P5@SVi`RxJ_뾀y QnP7sF~pbE龀 ׏辠9}A2-PGZɷNB@8R)徠.n4Y="侀1̸r@VVt2 07⾀9G<U ?0ྰ׌u om`j?.߾W/MnQ޾qtݾ.l)ܾ9{5۾` ھ(?ۧ ھhm2پz=ZؾZ}5׾l־ i}վ &Aվ!.Ծ8j[ӾjҾ4vx׷ѾQ1 оо8͐ξˤ̾LV^h>MXN>]>a(>hqkۯ>E۲>8ŵ>>R>{Ur>b?#|> $>:M>.>@gV>@vʽ>#>Jj>/>@M>@@mí> `?>%ߴ>`3>*:M>@g>>`R>sf> B>@n?>,-^>Gqo>lWlϪ>^O>`K~2>`̗>>1:>`}9>@v|W;~>>>N#^> p,Q>Wv>zl>[i> nŷ>`;>-VR>0$>0ּx>@!7>G&9,>Pu}>s>d>?O>`ˍ4>`f'A>n> )>T:2C>*>ed>(V-> ->s):>9B&k>jү>`HX >^S>٘>>p${">pϰf>0|>ԼJT> H1> ŀt>>>г1M>B/;>}|> K >`HG">0A%O?>h|> S>i>%=>`|>cنL>p#r>`4B7>Pmt>+#t> >`.>)+>p Lg> |n>gcǔ>>'> 㲈T>C> u)>U}>h<>{ć:>YW8>}s>AR>ة𣁫>\ GE>eN>0FO>ؖ>X8j4>b1ڨO>`Wsj>>(Ҏ>bǴ޹>2>>U>!> ;>cDeT>HKm>@>8XG>Ȋie?>5]>xD;>^.}>;0>]1>0SI>j3`> qx>7gŏ>p> ~>5.>az>:>>БS>@N.>жFD>RZ>S%p>E>Pś>PC >LR>H4_>H"E>`>0Bj#> &.>dB>XvV>`j>Yn~>c}!'>0 > '>`o>>w>j>H*>X݁*>H+=>^O>=a>Hy|s>hauU>OZ >H5r>o>eM>hmq>P(q> N>`/j>Ө>@ˆ 0>`z\@>H4P>'Ҏ`>8 |sp> 5>!ӏ>ڠO>c>(߽>h8>XMF=>˧+>p_>Q5>`R>Ȅf˜$>(12>󪏿@>AIN>mZ_\>0y-i>xw>xOӄ>PZ > 8 %>>8$&>.I>pOR;>q1>@H&>4>pF>8]+8>x1׉ >&>`FFK2>@R=>YI>@^?T>`4:Q_>Cj>ou>hZ2>MO^>PҔ>!Z(>h<7_>`v>Ho>pJ>>@'T>G!>^ꂁ>xw~>hR>pm>8Z8>H܂מ>K#>8⊄+>SqJ4>8<>輸D>L>(=:VT>ȮE\>Yd>4,l>xǴs>(i{>3fn>2g>9G>sE>@ރU>fkGM>Hh4>c4{>a>9>`_o>>pYɈ>NjMl>` 4>oJ>Ms>x>J=G>];Ɉ>>>P5>j >ڬB>ȝM>n>ǫ> 3#>ؔxn'>7+>@+yb/>P3>hLa\7>kg;>e>>>DB>(BE>8| I>ALL>esO>lpـR>ȸvU> zRX>Y1[>`]>j(X`>N"cb>0.8e>L1g>,9i>*k>,Gm>Ho>,@2ǡq>9fas>& u>HRv>x>xyy>L,^z>{>M}>U_%~>d>H]>`5 k>ҍk>%>Գ>L̖Z>hI`R>TM_Ε>\QsÃ>TF܃>l߃>̡]̓>Ď o>@oSj>L>p=Dq>8>Zz>$C>ԆXM> >@Q~>]S|>S{>=tTz>\ny>,5vw>p Kv>t>!!F9s>"q>o>uN n>j-l>p~^;j>56h>@|Vf>Dvec>7)a>Tmuf_>tja]>\fZ>z X>-npU>tlY1R>p P>x=C;M>Ej)[J>Э(miG>\!%fD>gkQA> T+>>:>q7>h"uQ4>a60>p6k-><_)>T-SB&>B">nl>Y>i(>\5l9>drk9>_* >`Go >diP>X:>eL>'YH>` >@*U>)@u>p >^qL->ԥr>'>>4<>yg>8v>Q>\qټ>d>$݀>T?H>𧠴> >H7>S>x:>>S>D^Y>8'{>Xu>$jro>+j> 'd>Djs)^> X>UR>\K>#QJE>̀o?>Hz$9>42>lLg,>%>4y>`g>Z>te >\e >y~R>I>WY>`]> }>p1O>@>ɋ>om>@H?"Ŀ>@(A>^a>@&7>9Ң>@2/t{>tES>>ĤL5>K}>訜q'v>,n>l&`f>tE_>#W>&O>ϵ H>s6@>EZ8>ts0>FM(>. >pxb>N|>f>0IH>3 >> d>"r>b-'>x]<>'vu>)F><'>p5*>֪>#>4Д>ޑ>@#|Z> y>Aq>ԫ9Ih>_>l_V>@+=M>E>T:<>smT3>0Ѕf*>Tj?q!>Bt>p> e>,S>(Ux:>1Q>H>>8s>$Jf>d>̳>S!}>Q'>)̗>I5j>P>̴{>d>r>X!Ťh>U$_>?U>)L>$ǶB>0l318>PJ/>u%>6f>xFwQ>Th>A>^'>xMd>>0C>E>d%>D`I>Oh>Dw>s嗜>(>ZӴ>r~>܋8t>Jj>/`>V>drL>, B>l~w8>u\.>heT;>$>Ϡ>z>>a>Frzj>\{(5>+>x+K5>~>k4;>U>f>J[>Bi >N>A_>0gv>:gk>Fa>5[V>x}L>twwuB> ǯ7>J[4:->w">JQU> >K5f>į5>ZIm>!F>JF8j>8_c>_>P,gֹ>÷K>Ԝ(>e=/>~Zkv>< >]vz>(Go>\aMGe>.ݖZ>$[P>g ltE>ϸs:>40>\s%>عc>|J>p>9&>rT>쉪>JMJw>ZRcFS>h>@L>`oH>!$7>Lɲ>}5> TV>&E)z>Ree>tFZ>,O>@BE>4:>&/>% %>V:c>!P>h3>|p7>'}>>2 >dTO>{>06۹>p >^_f>JnA>>\8>y>Xݵn>d d>oSY>!'LN>\7C>"1+9>ZX]t.>j #>~5>VQ>2> >4>ؗ>,F>hE8>m>|ͽ> %> _`>W᳘>>`\>cͲx>q n>^mbc>\xX>vG$N>hqC>(&8>-.>###>Dad>1_O>X>(>27>MK>hP>> >6 (>hX!>@>Gx>#>B^:a>*؄>24Pz>\!'o>FDqGe>Z> EP>)E>zoK;>$0> fnY&> c>io>!>h{>44@>8Bi>pLJ>$ȥ> >pT9|>3Kyg>9hR>'4>>dS)>C >C2>58Y>@J+> >|6 > >D&+p>nrto>ri[>ԤF>Kx2><>X؜ >H-7>>{>2&>,֣>;⌏>XbH{>a> g>R>>>p*>I>TI<"(>0Ҿ >~'Q>

    ~ޱ>J!۝>T%މ>Su>t4a>0 N>tI(:>UK&>d>t>8a"F>4N>_>(*?⠯>(>G>:=2t>8a>Z|rM>W9>s[&>t״>1s`>fq>])>f5">H>^e>>Bw>R`d>&8MQ>z>>J*>*h>>q=>>15>y>Mx>/~>>ɡ>&W`l>ctY> G>sv*A4>X4{!>>6>vBY>4>d>2|>e>C?y>kg>2GT>@B>10>|I>~% >Rg>>8->>m><]i>t=H>0.>h!=z>4lh>6NsV>D>\%*2>  A >v.a>6 7>>v/>1>*Yz>NSˣ>L[$>x^(>藖 n>Ab]>w+K>H%_:>l (>~>tP>Df[>Ol>b!>B]>x㥯>ᕳt>L>tc++|>6,k>>` Z>vOyH>PPlt8> '>!>9>,+^>4> >6>NkE>d&>boJ>9N>@n>x)^>rlեM>:(=>`,>p׊I>&J >\Hƍ>X)+=>>ʊ>r<>e$Q>( ,>Jn>ry>Di>JY>`I>R:>q_ *>iA>=l >im>[-t>,GSS > n>!!Ļ> $>Y>́>Vֈw}> om>;5^>&O>d?>^0>A!>:L>Wa>>ZQ>d8$>>@>^@2Ҷ>ȧ>8'Ș>NT{FЉ> =pz>7k>dy]>@YJN>n ?>/ϼ0>#e">#R>|:;G> >$v>!!>e> >x>PB>خ>SW>N Ft>FRe>@W>a%RI>W9';>Ęq->_>Q>≂>X(>0v>I&>>1ez>,V_^>F?-C>ϫ'>(ո< >>#>d XV>E->7>\>L_i>%N>f93>rLփh>[~>I=>*My^>8?<ԭ>"e[>jx>l?K^>=XD>%*>R>4> >RK>I,>Ӱa>t>܀$[>HjA>'>P.q>җ>1>sQ>j5>=c)w>@.^>4EE>܍m,>z[W >D#u>V. M>Ĺ>7>OBƘ>*Աf>rdh>qO>Zu[7>ڨ>>DG>jꎤ>؅7̿>=a>P> x>2Oa>V傥I> 7!2>H3X>%n>J.>v>>3Ӧ>ȭ֏>bx>^ b>BK> K4>I c>V E>E>D>d8H>p> LA>>>kq>(@|j>tT>(O>>9(>>;>,EV>l>׫>rمk>w>2-v&v{>Dof>"IP>~;> /L&>z)>N-y>j19M>s">x~@>@g2n> > ~>03Vj>T|9U>^P @A>2E,>p|h>N>.>[I>,Rbw>b>>@j\>ڄLf>w>TAc>JO>,5t)<>`Z(>X >?]>>1}w>>d!ϳ>`̑>c>VȣDz> 5g>p4T>WBA>L_`.> :>F>S>"Qm>>7߀M>;ӫ>%nDi>*Yk >Ffbt>ByP!b>;RP>1>>$,>w^>'>4LrA>>i>+۠>^>_:>ٜ>Ldz >Fqz>uXi>Tb޶W>vTBaF>]5>#>a>r {>ɟ>Ȋ>"`>A>ϑެ>sI]>vƣ\>tz>$pj> yY> ;I>׋8>Y}G%(>EZ>d>O؀>&d)>ǣ>p.>n]m>nM>->|u+><>4=>H>lq>:͸Q> 2>ڪ!>JG>t5V>">D> #{>NrL\>$"E>>" >ܢ>0g>>080S> >ZIn>.8P>0 53>^I|>Toj>$ڧr>B֓>Ap΢>(ӋJ">f.Ti>l~M>θ0>sl>wc">>462(>`v+>xDF>(p{>l>L)i-Q>>'5>\EB>‚7>>t>Be[֏>t ş>w>F]>4G_B>j '>hUW >p=>4ݐ>H }>Ӽ+d>T9Tb>wp> V>z:<>AE#>4onB >NrsB>'>!(>Xk>h5Q>V~Or>di,cY>P@>)'>bX'>qt>(>>i>$Ȫ)2>x)}>*e>(Z*M>v25>-i>H#>.f>c>ix">%M)ǧ>F>Qy>$7b>d>2K>fB4>X'yh>LD(>ɮ><]Z>,M>xaWe> jk >:z>XAvi>RPwS>J,o=>r|_}'>4Iw>G0>|!>\F&L> >v:~><>Nfz>,e>CkP>B~Y;>@S[&>׷'q>r>˓><*>9=>k*>>4>eӚl>BæǯX>_D>/eTx0>)y>&>ε>`;>,>>8|>1;h>p8>E3>Zk>ʏX>gsqE>;-U2>$#M>LMV >=0r>:>T!>_y/5>YQ>:>Lqƛ>ן 7w> >Ld>~(R>Fv@>-Z.>zO>uV >{>!+>4!F>R c>;]?>@E>_>(NV׾>% ´>XO>ؔo>JozM>V,=+>⹇ >>iƽ>P䤽>TR>^A>S >@6;>p)}޼>:߽> .c>W}>b\>HŒ<>v>ݻ>'>2ޒ >au~>ppr_>~V@>V&Z!>}>V-\> ź>^@>H@>F(i>ýK>(;->,>؀`*>rԹ>f>>><|>e^ _>ܬA>%> .>t>Nθ>fY>6I>X#Xy>Rx@]>P&A>X%>d< >Z>zҷ>>"࡛> _>T:e>0J>("iHC/>ܴKq>L>FE!߶> Ķ>Ȗ@>T>Ŝu>?[>2vA>'>`c->֦2>40ڵ>wt>4C<>{>&u>Ya-\>VwC>\"+>={l>>%yM>wȴ>J>nW`>D> Bh>~YP>1c8>&f >i5 >u>n~Eڳ>³> ︫>{%˲>c }>ޞf>((yO>c^˩9>us`">gbZ>{\޲>9Ȳ>^儲>EpQm><{p>{Z>bD> X/>̓>SƎ*Y>rN>SRjٱ># iı>م5o>}im>ゅ>-p> d[>%?edIG>L,K2>Rkn=>Җ? >wS>NpQ>A.Ͱ>9*>=J.I>M}}>i>"&V>oB>:'%$/>U>T=w>4W>։q<į>B=S">1x>jR>{,#,>~X>X >vz꼮>jhU>NZG2s>?vN>:v)>>al>KRc>w%u>lvu>&x6R>;w.>ܣ >-j>L etŬ>,ͤ>)k>Mx]>~#f;>n>6 V>E+ԫ>葭]%>|'>tJY p>d7N><&->t饔>dʪ> 5>A޼>?hi>P7I>Rh;"+)>\]B >mE}>/ɩ>]>>Bk>wZ1L>N->hCt&>4H>}Ѩ>,V>'>v>s4X>29N ;>,N-5N>.Ū>L+h6>H5ħ>NN>l=>\rm>kP>7 4>'(u>=>:pަ>ʻVx¦>:?d>rl p>Xn>yR>frQ7>$=>Tڄ>LRM>:54ʥ>W0;>%+W`>{P$y>lx_>5D><&*>fn>>>2ڷۤ>*т>@.>R2U>zt>VׇR[>A>=_/(>-o>ͩ>ݣ>uţ>\Y>5<͓><*#]{>9 c>4(cJ>2>G~>Mi5>oe>>Di[Ӣ>άɻ>;S>?>+v˷u>E^>~+6G>r0>gϟ>[$. >n(i>tա>,w>of&>z,>c|>bf>P>as;>4=u\%>9hվ> y:>AE>@d ddhrf dsdNY3{=ɥsҽ w%v- *FWHv--)|;/",GvS#hv]<\eYu@FofIutㅺs})v`Ή<|jSmL ԥiQ^SoE]җ磾i9l |y=*`PĘ뷾υ廾Z5*+!F B¾`žSeȾq~x˾\zQV7>Bg&?[ʫ?d@!I[@{sz_@ XD@)$A# gAeAͫAN}Z b/BX/rBZYB|B69C3b /&{C[CiCȢL@DcEDgS0D}YE)CE&O,-E[_EE.Fd0!AF?'ꞱF0-ٽFބ F,@8GSuG G2G|z,@)HXNdHfI[H=H܉n;IZIIhINGIUI<9%J\R >[JܹJzJ&YKJޏ!)Kc[KVyK>K~6leK~h%LxkDILvLj>LLۑLFm3`#M <_LM܏tMM,uMz-M:Bm N@V/Nqe-^RN|tN&9wNlNVN|f,N`6zO8$l*O@0FbEO,?X_O9WMxOI @Oh.O7FOO?OEOl[8P q P)5pPTPrr#P4_Hn)P܉/Pt4P}om9P>=P+AP+|=DPFPj"HPr:JPo4'KP4KP?1cKPNJP42yIP ۵GPpNiEPR/BPj4?Po'M;P>6P.>51Pw*fc,PtZ&PfZPF<PPGFPFPp+O(OOHo`NOҰOXPOӽSȂOl;jO܁POd6OEO`vNHMaNǕN|_3lKNd_)ɁNȑbN`NY=N{ xNM}Mr8MCtM@0a[M}&c2M•}MRL@^L\mLPWLج)L+߽zKprKp罻KدgK~S5K xKJ@טJJ<cJ8Al,J^E`^TExJ6QEHD{H DB=D(gCP.OFC5HgCP?3C"DBxP?SBhaBBPA AXUPk= 5T=D<<'w;p~v~:`06:W谋9>hS8@nmQ8!W7ɹ\N 7pN̋g6X5E5Yy4p4 S3p'.3io}2 :-1A&d;1Ork0]sa/@gKs.<-߾.T+7* :K)'@Iz%&Y%$F"`ؓd!U4 3xD-H6=si QUK-$h U%AF3?HPS-¾ {~M>;> >[K?g2?\Pf??$=??o&e?,l?8?l ?@1m!?@ #?``[$?@%?`Ds&?R$(?@/e)?I8*?kr+?@Wo!-?`L].?@jv`/?2\i0?ΨO1?? W@D>? h??`??pթ<@?i@?pV{@?XA?p=HA?0iA?}\""A?P B?`L)_!LB?zB?p&3B?k C?*KRHC?o#C?P~vC?۹GD?)EbL? &u|L?K6f)L?ŎIEL? qCL?bM?33M?`^ TM?>jTtM?{ M?p~ 13M?@/M?0M?О)H N?L~/*N?MFN? [ObN?ɰ5}N?`K3N? FON?@N?քN? cNN? G2O? t,O?3zSCO?P'YO?iu#BoO?LmvgO?-BO?<O?C“O?0} ,P?psp"3P?!9P?@P?OFP?hLP?{VRP?ȥ ;XP?x-[]P?0䈊bP?PqPgP?,`P?xBUt[P?~VP?('pQP?9$LP?C FP?Avd@P?0`;P?H=45P?x.P?ee(P? !P?wP?' P?(K P?(P?{_;O?8(O?5@O?Hx\AO? /O?8BO?_CO?`#)O?NxO?p*XfO?x-hTO?P$zAO?o.O?>_O?(ԙO?|N?(PnhN?:N? ZZܶN?N?HLN?:wN?P"AaN? )KN?`4N?0(N?2MAN?NgM?0M?nwM?^ M?8XM?pCsxM?@ex_M?HCFM?`M-M?p$lVM?HQlL?8ZL?h 7L?u헇L?fL?:MwL?4^\L?`yD9AL?%L?@YQ L?XqK?(7yK?;xK?"K? }K? I`K?kwDK?5*&K? K?^BJ?` J?PYJ?xXwJ?5%tJ?H0 VJ?n$?8J?J?9I?ظ3!I?6*9"I?HO`I?hy~I?OL_I??I?s I?\UCI?{9TH?p1]FH?΃H?8-H?if_H?(`1>H?1iAH?PӘG?r:G?$G?Pf)G?!v|yG?nYXG?ۉq7G?0QG?z`#F?`_.F? F?-F?nj0^mF?x;Ϗ|KF?*ֈ)F?(fF?smE?@CMGE?dE?yy~E?S%y\E?8c:E?p5E? O.D?XD?NOD?xD?(SzjD?p--7HD?;d%D?XyFڡD?` C?PtC?*#+C?8-xKwC?fTC?H{1C?@׋C?g5B?X!eB?B? 9wB?!V_B?E?y>?`Q4>?{HY=?Qm_>=?8e=? tHG =?൴l4?vbE3?6-3?k&D~3?>3?m2? ,"+2?pSDp2?qrA2?`.82?`F 1?eM1?0% gG1?TJa 1? lz0? ` 0?*hP0?pK0?@tp/?|2/?^+gƹ.?`^A.? -?`̽P-? @,?@81b,?uǵ+? cu+?*&^*?`\ߘX*?}*?>T4)? !-)?`A(?W&F(?4'? ȱa'?$5&?`9&?`*'H&?PA%? U;.%?0]$?iP$?p#? s#?l)l#? 8"?p-"?@!?ɊU!?P= ?`J6 ?pfm~: ?yY?H|?}?=pJ~?\?@xI?||?f??5連?$!R?ж黊?i?Xq?,9?-6gH>.>5:3> >FN>w+5>>~>":`>LM>xH*\>OIQXξhaAҸپ c0 ?*쾀 Aŭ I5lwk fXX>A M,1@;)]@#! @c&Q G9 QW s @P|@32\chPӘcn mſ-}s]@ޝ$W y` @svm/`oZl  𫩢@]?*@+}>=?o Ӑ[:!j]z``R$H`.݌ƻVwn.d`r0z TX ;'] CP@5'jԠطg 09 0ĺm pQAF n9 0AI[@!Sh9!zk!0=S!໠!BIb!,Xy8-" [A\""0="%i"PrI#pv)B#'9Io#/p|#8;=#yċ#Ȼ̈h$HG$eq$'QX$pS,r$@-9$d W%f#=%d%X p% %%+%8}"&`ŢG&]5l&/&صۧ&Ϊ&S4.&XT'='D_'%^' ['<&Z'p]?'yȽ((O[>(Yn\(hNz(CsK(Ro(b0(@( )0^'&)aA)?;\)@׵v)p2KА)))8Rs)F)=)ŷ *%*"==*TlT*01+@k*W*0+Sؗ*hk<*ev**h*@6+?̎A+p(+Lx!R<+=xYO+x b+7ft+Fdn+Xw!+C+()"U+D;H+G++P^+ė% ,!,z%(,=Ry7,E,HJeS,M>.a,yn,`w{,x ),If,^~, sx,hs,0I6,9',X,,,`L}B,,#@- | -0I:-J-i+#-Kq,-Dg23-FD;-~hB-lJ-l+"8P-l^FPW-d]-T0hc-XBRi-㠿Ao-h$t-hy- Vq~-M-binW-Tmm- uF--`/A-Xd-HIGL-c-j-$ߢ-5-҄$e-K-xC-4_- |B--l>d-?e-0- Uc-d5-xE-R]-%-HH-DMM+-p-qˤ-MТ-0'-G-AOb-g-˾-qR-d-\FI.-y-l_m-C-X}~|-="x-x%ks-4]ݑn-d+lj-̑]d-@@/_-jZ-T-s/O-TI-nW~QC-2=%=-`jL6-6KW0-j-.)-zU"-T&-N- -5L-h,jk,*KP,(_,֑y(J,.|Q ,w1,79,T0_,ٴ,28G,,r0ϙ,PĐ,c@',dM},Vft,=j,@]`,l]V,D@L,~N B,7,?8-,R",d2R,O/ ,YRI, H+g,+zg\+:ia=+ 8+󛳾+;+$\+1+9#Ң+fd*+79w+.6Vk+ _+*ζR+bZ7F+xn9+ k-+zM +]WO~+H?_+'"* )*Fbp*Q9*p*5Q*u*ox*`&ƞ*C-W*is*/e*(q)9Vb)Ⴝ`S);^D)\N5)r2&)u )vS)T4(zH(+n( v((5?¦(RE#(Qe()BX|(mT[m(0](BM(MY;>(g|hs.(dD+(1(3'h'HC '5'Gu'{'⼅'eì܎'g8~'og n'lk^'F)"9N'H>'j-'Y 5|'\2 'm:{&s1f&p('3&&8Ȩp&DOq&`t&o/6)&y&I:i&KX&{K=H&X7&$P¯1'&IH&:&,ˉ%В@%4a%z4%Ψu0%&kt%P%%)W%2ɶp%n`%ƓpO%¨>%2A&.% W%J %P`0$`2$3z_$6$R2$TB$m<$BTD$Kv$Ef$v& ?U$D$aU4$#$$vsFo$|W #έoj2#HV# n#Nd#v>Ξ#<\:#F/Ũ}#-m#Q\#BL#2À{;#`*#F(qu# #2<0{":kxo""6FY"pg┯"ÔvE"4<@ߖ"F|"v"ԭ he"BElU"E"J4",S$"چL>"vC"!J!xX!,A+!P+A!!D:’!f>!Rʕr! b!P0,R!X'+|B!O~2!``%"!F!d@D!l0 t[  ccC5 eg lC yAߓ $ pt d lU H{E j55 vvO&  db՚> X9HPu)r&tPީjUح6h*%-X l:,Ў@fV@]a@S6PCxΓ% (aχHŀ`ԡl.rɜMT(vq6yT-RoAk@RCB8ãGUU_ihFL;>/G*Obr~ʻioOwXd eJjI`,(@Z0"u#pɔxSgH97LP0Z]GZsi)fЌ"o+T3Y\9HaH(ɔFP`W8Szض尲[=U}8bZDHJy-ptP,e.9A`pتhlנݐX*Bv)!]se`CX)`p $i,Jk808dx^=^pńE8*-)v1_b#I XG5 aw aЂLhXd Pट74zf ҡHCpqMЋ#̣k8GIƐc5ya0B(MJ 28i8&cOLeЙ[e"ݳؾz]wOdSRM{87 ؑo H,F'(}(fñ Wiɛ!, 3pHzMZ DS/PЪ~n X,\M"OXRo [}/UF*UU1y6o9h֏47`ZH( A4Θz4%fT ĻR\^>pf)+ો|n^uZPL*0˹ /ZgKfl@jtEY |dX@yq 7oJb0<`%C03 , @q _r^ ^ m.E: C pX/ļ  rXĩ  `[b ~g> p,P ?I 6 5 D Ww Wl 0T%FJ GR' ΢ `M ~ʺ ` p@} ck\ I: 3B ɤ | pý (*ӕ _1u kqT pgD#U4 B] 0l=P|MHm2&}߫ tyUx)6`豤O89 5k}^IO@L("`H!.:#tpvΪ0iHu7o|RĘw5`Lrp9H v/P', lp坶PPsN4gp@ARNMFjP[s@לոX]=`su"q2@`ǖIx`}p|x@=TjjfNjeP0~6pgP \ k SYP< q =P4Zl+S%Y:p']"08T P0Ǘ`ij_~&C:BB^Pyy I6aٓXJxs2pNEI@&0`0p|0pD^0u{ {2vdW!N97&!p: MdTU@B“ WdhN9=]BƜ!@<|hh`-q>p`$z]Ջ`_o u4Fx+`9j`O;Ȳ}@gsV. VCC_8%ݤ'ln`E ** @SsCګc`M>%@y[ Ө,uTyd ES4@`PCtg`į ,@A4tlbp[J'@pק`d ݟډE~ݿ\@\>;hz`U!T* @+owZQOWX7 ZgbdѩhQH 1 z\`R=?r)`@f!- OX}nj`/M$/O )@ȸ :_ [I(›f XqJ`k.ъfV6` y¿`r@F >nYS~˘8 (Pa(on@(@iQ $`ڻxg M񾠪4񾀫^VCFY`@z<]|vޞ@q[``ZnqU`- =`o%𾠶 A13EタgkJ_タ10@タ%8@ܦ@do[yQL@j).JB:&o[m@aAk#^@Bb@;xi3yR?ݵAP&@$K@@en[GEwLb뾀JI?@@z<,Sӓ+@52l@~1XQ龀)/h]sB龀"zS#h?%ղ辀 1jȟ`b;0ǰXѾ sѾߒѾR,evѾZѾo=Ѿ,!Ѿo-Ѿۿ о42оijоEо]}о@IbоT,HоT-оWо/UϾ &JϾpϾ =v&WϾN $Ͼ{p?ξ ξw3ξx(Zξ@)ξ ;!;';gwje;Ũ5;{;V#T̾E̾^c'x̾~I̾¼̾O_B˾ɩ5˾w~[˾id˾Cen7˾1+ ˾{HʾV,ʾ<[;7ʾ͡ΕZʾnĂ8/ʾʾHɾ$މɾ@ydɾUqVZɾC\0ɾMɾϴȾȾZȾYcȾJ;Ⱦ6Ⱦt;ǾaǾ_ǾcBOuǾ|NǾU'Ǿ1bǾHvoƾ.ƾf1ƾƙ2jƾXQEƾM] ƾ^žk2X$žr޲ž_юžPzcjž8 cGž,$žužľ=(ľaľ%Wvľ+H@Tľ˗)_2ľ2ľ}>þ þþ{_þ8xkþK/ Kþ*þb þ$¾GP¾>k\¾¾-m¾0 ׾N¾dM0¾VоcW*qA"O[p!|jA^ka)Ap$G/"<6;=vw'%ze^HCO'wT 8wR㿾断1xMC"C-di%>ھbo 9r;m>t ; ؽ"itB!Gp߼Oyn~ fENN3Ts Yn^PڏaU!3)a ׺vJ|5MOP\" 0 ʹ3zxrATGO^هǸ㜸re!JI[H{ͷFR|u>T! ,ih| ޶Gy^-07iEC90>Y0oѵje\GAgb\=8[>l0;R@Ѵ0s#NfCp![H1nܳ\4u ,d\vPTr3|(ѲÆmpfT\̪c 7o$ASIj婾+JÐ\Ҧ3_2P<X6ڨ(yz;PYnY@. xڧkRz:9^m0Y5_ g Ȕ䦾޻/o 0elutE,;'WХfGp69;k^B;8xb!ɤr"πU19\E-88IXm:iUΣp䫣x:ȘYf+D֜"J˳h&Vߢ0k#VW|*zt[a;7 "ۡ~4#O|t|^gT? $ %.}Uq_䠾^UuƠIܨ:pm $P3VY"\0k.O0?Lֈ̯LGݞ" uUՅ?獾>dx{tF*4o%܌R47ʷs޼9 @hz 9ً5'htXB^ipO>/y89`ynx{ϲx~xtyWxf*x3 wl>wzޤwNxw1jMwH'"wV3vEŒvΚ3vV'xvDngNv[$vFuuXuu9AZuLV2u^ utlݼtBCt4e)ot(7It$tzRs6VsBEZsRs,ksF!FstD #sTYrƶr,rҳЕrK #srPrB,.rhP rqrwkq3q^7qΜwfqiEqP%q@q8kpF4p-֦pK,pt-hp Jph+pPES polLořgo4-,o$tnN{n ~nEn@ nqmʜm|DemHEq$.mHypilllA l߬Ulc lk-k#kjEyOk-kg@j\*j 7bjdRjbX/!jGiKii@^i002/i0hصFh~.h.shTEhh(,g'xg@gౠ[dgH8gx gfllfeV8m';VV)~Ux3UUliUPK@U86U UTvTK T( mxT*:dQT*TJGTySSMÑS$eQWQ,+{Q؟*YQ􏧭8Q QP D9PxYPswʕP@B,uPhNVP6P{xPaOgIDpC"D0~C1C\C8C3^bCˊ>1b>`Dk'>\D=x^#=x=XT?=Pd=o<P(ɕ<(rn^<Nx&<|;q;V;ؾM;н;R":(\Ձ:`*{{:pdG:@1ǘ:9x3@9HΖ%}9JXkK9908P!w8068snSX8( (8707`W79n7.t@7H7*6w`36qʌ6㒷`6J46A 6x5xj5}bD5j!%_5(u W55h׉ 54͹4~>4 Zh4S A4P\cc4hl 3r`3Xk83p}3X(W3x513h 362P2 }28yx2`DT2U02@ 2G1x&1V&J1`1kZ^1F<1ku1 0@0hԅ00X&u08#PT0H240@M+0/a /m/Pi0/ ny.VC.y.}=.0.5.p)5-d-;4R-@-@(,CRצ,xn,06,+p!+p\'<+MZ+u%+Jѫ**g*vkQ**|w)m3h)ை)ȐTsR)Hڋ )XD`(+߽(w(\(X,(':'v'(p'pA'G'8bt&B&FAi&t_&h3&&Be%5):%Sb%xO[%ԭ1%X%A$$$}c$Ч0;$'m2$)##C#8: #8s?v#h\O#c)#$# 2"𹑲"0y"0rn"ZfJ"p)2&"`D"P'L!8,:!8/!*Du!ᎰR!_0!8X  `( `-M QBp ؊7 V0 o E N7 X @! 0o p -qc `"pDJ H 0]A P[ y p"F  ޺н֯`PN~0S(M`c8dt {Gz?nipy-0.6.1/nipy/modalities/fmri/tests/spm_dmtx.npz000066400000000000000000000167101470056100100222700ustar00rootroot00000000000000PKsFil arr_1.npyNUMPYF{'descr': 'toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iB5F.jgWG*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7Ek*-OSM*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iB5F.jgWG*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_U A!c$;iЎCGi&SjTB{7Uxqzfe`aLiZN2N@1$p0X^toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iBtoR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L??A?HFm5?B9f[?[jX?`xT3?~5lvKy0@k#*-sؾ>2* ?墻?3$)?d[?Ճ? T?T?3Q??B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_U A!c$;iЎCGi&SjTB{7Uxqzfe`aLiZN2N@????????????????????????????????????????????????????????????????????????????????????????????????????PKsF}Wr arr_0.npyNUMPYF{'descr': 'B*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]e`(2}1vQcw>TpsfhiLg_qq_uNT KӃ@"'AB*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]e`(2}1vQcw>TpsfhiLg_qq_uNT KӃ@"'AB*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]B*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?=_ۢ?;?rO?ƾ?4pW΄_?3=Ƙ?J?tTB?qz G?(F?|s^?]^~?gZTpsfhiLg_qq_uNT KӃ@"'A??b>?7L_?Uq??K?%? d??H]^ߗA?[|Q`?rM h?Uq?:S?œ5&ſ? JԭI?:SH]^ߗAѿ?œ5&?Z|Q`¿}2D-Կ? d?%̿Lҿ?밐?7L_ҿJԭIʿ?sM h?AIӿ㰐岿?rM hBIӿ۰??簐岿7L_ҿJԭI??d%̿J??œ5&ſ\|Q`¿}2D-?? JԭIʿ:SK]^ߗA??Uqο:S?œ5&??H]^ߗAѿY|Q`?DsM h??Kҿ%?"d?b>ӿ7L_?Uqο?}2D-ԿAI?b>ӿPKsFil arr_1.npyPKsF}Wr arr_0.npyPKsFcosbf_dt_1_nt_20_hcut_0p1.npyPKnipy-0.6.1/nipy/modalities/fmri/tests/spm_hrfs.mat000066400000000000000000000630501470056100100222270ustar00rootroot00000000000000MATLAB 5.0 MAT-file, written by Octave 3.6.2, 2013-09-10 14:52:06 UTC IM 6params` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@0@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@` 0?@@??@xO6hrfs8A e^h>h.?Ķ8Cj?RGA? mG?N2@%?ښN%?ۉ K?~9 ?ާ9Z??X?ύB?DPv? {z?"?k?CW>tqmc7~!JqT^Pw=4L|ip&A:7 D>Bw_&5tƄmED̄(|PXьѴjB2}8bySp}Zw$zw<&'u62ߔrP n{-p\fTk}h=gCRdL]`j#a. \aeWw2Sm64PCtJ yxEo?WTA6;R[Y96}1М2]+ r%TF>!?.15B].8A "# O?NyMՂ?>8 ?[>?A6҄?l{ι?_؁?nݘ"?1(P&?S"?J?AWW4׈?C ? ګ?sl?zӮ?P-'˙?݁x? TMlj?H!?zIWp?}I?b?^Ըѹ`pfHcw*5/}gGJL+wT2gF!/$BJ4J4,xDalC+.T9ET}cjszoDw~uƠr))pޢY(k>gG9Od"a\W6~;SZu3PTsJwETAݥ7;D~96ԏq1mm+XIj%%>!lsۑE0ZeZ;.8A &=nPF?xb?'B%_?Y1?eAY?"?nι?A'@?з?gѵ?Z1턳?(I?ʡD?7 ? ?8?e͛?lmF?~?_gF?NM70dFrlbo"-kc~gaEZdPDW7`Ez\ҁ@x>$_1?_b?$@?,?=?= W3,?Q̽?Ew?x'KT?9 Ʊ?Jմ!B ? ӹ?&(Lk7?vy?'ŗ#?W-#??_?8?h~Tm?8e<;v6kv^V@vG(}~δ"xmIu烿T}*߄%z7JO{qS鍳$a:},p}D@Q_z[^wI+?'u5ؔr~\Ox-pojQk[Үg=g:CRd#au\.eW25S /4P3CtJjyxE̝>WTA6;uY96_S1]+Qr%aF>!m3_1[[BnW].8A $F5?&"^?p?Y?ZU+?x.ׄ?+U? {wL?@?_@p.kW g HdSBa` tO\TWF:5sS~@1PՏupJBuEhgRAe;5O76ģ1kXy+8&-%)݉=!+,DNV)0*|-8A g\?7,eĄ?#M?{S?b M׭?y?2Ԇ? VF{?cj?jF+ފ?K*~?zan?8zB-?{.g.9?Oq:Ct?DM҈?/ߚɤ?K7G?@? ڇm/?Lݐ?°@? ]?kS?q?6 G&:S?m[~Vn}Klƚ'ZtdFZ=8y4=W{}*td.5|-׺ ʗY H}pK8/{81.axqvYt9 q*od0jKCH.g7Zwc`;" `qK~ \ejh_WB~XS¯vO[|gJ&\v!E7iAPZo;Dr 5p1/]ɚ+__▾%V !  KM21(r#f&Y8A v"|=u >ʪ >tZ /?t(&TY? 7P y?n?ٜ1?B_?1;_d?;.|n?+Q3?R)Y?ԶV!ĂmJoh1+B4/].8A Sx>c4? >Ya?퐿}?2]?R?񅮧?yV?6Ju?@t?"?BUe?H $?L;Ʃ̶?Sڴ?{?~O_?)4q)?Sŭ?O?U?5Qqȓ?b^4l?{?p n?:DB0?mW@ld}DR%sn~LyPDF ~;`jgͲv-{|3 ڀc&'@4i~Z-|y'Nyٿ7wZPqtCrSo4k [lg+dQʫ a`i+\*b/WWΩS >(Pq1dJG`qmE֧LAT ;A\u26sH1+sZ%>E;!nH&ɩP/X,8A {@6?pLi?4ˡ{?7/o?=e#?q|C?`Y*?=w6V?8g:u?9?Fh״?\K戴?n i?l,X}?^.9?u4?t{'?9|?["$?/?̓Р?J~a?N?`Aa?E6m?0~?jHqp?q0R?B?2TYiKy%|r͐{pvT9;^y';hzZ%S{JzZ`y8%·xa`v4MPt+ʿr8=)paNgm|0UiyDŐf H|Ab `j&[;&V3;3R+N$PAIG@Dĉ @x7::d5%g1[TE*i@%:mA G ҧB(._a5c52EL8A .ST?Ђ]?}{>? )/v?] Π?Hc(?]=7?h?dsƗ=~?-~ы?&WIŴ?2?$I[?iz1?( H?"Z?﹥?Mt{͡?;wZW?1?qj?iYڽ)?Tק?^-?#-,w?q$w q?"-/{i?:Hb?Nh=:sZ?cߺ>.!j<>03^'>aX%>Ru>>LSе>qT7>g>⟔>M,!y>]Y^>2`->vHE>L}x>H†'o>*e>c\>RR>~jujH>lg(@>!*5>+>"1EA@">8A |4۱F-U4? 7un?%9qމ?F\?&L^? ^?3[?5Da?Ju?QںV?>?WcMw?ɶ+H?3ݭ?[z75?b~/> R\K>!aV>}>y>. >J1W>Vl`>9:>(~z>H3:>@R>/E>&Uw>%3n>vd>B4[>)Q>lc|ּG>11)rJ?>ۘ q|4>+>M{!>8A qV&T/6h.ZvS7qF?d^?Q?!? 0G3̳? D?tňP?h?/>oе?GXܳ?G}߃??Sf1?hs_?ȩ?#j~fuj??J2ɏ?|me0?G22?͆T{?e!%s??7&m?^{]Te?3NO7B^?&U?3 I^N?Ƭ'e?E?4n|=?: I4?ȩ+?2\"?rk[?m)? -?sߌ=>c\f>da>חh>Y q>w>+>3>Ԛv2*g cX.ނF킔>8)o|Ɛt~vxUȎo:N^ؕ~Ю.xox r pmz~ ;V;>t'>a5c%>u>"ؼ>~6B>^$=7>>j{># y>̀U^>d6]->#D>|x>)Œo>c*e>r\>.FRR>[2ujH>^N@>;F!*5>f+>M 8A G?0)Y?K3|?*Pz??[?gyCϩ??=m? 4ݵ?Xu?¼u?+F-M?5r?tY???LWs?-g~B?Rޘ?)WK?I)^?߅b?*1?U c?ɬ\z? hs?,5nl?d?vW᫇^?ɊV?@6IO?9@lF?diu@?aPb6?h7 0?)&?tBPn? P?5dN?=T?i(>t>|&j>rxHv>z >1 >>oN/>q>4BCB>A0>ܟMe>J>j\Х>B*~>{4 t>j>S@a>+ WW>X]N>?|XD>:>9g1>,ƅI-'>FEk>8A 6:*)b)#?z3kq? ?1!?ΣϨ?ma ̰?1]EB? UldZj?c|1?a3?yhV?i8ۋJ?1Bj"?zZά? R]?M<"݇?唽 8?z6^?:\?\ތ?/<&?^'??h~w?Gp?ƨ +h?V `?7=MW?~)FP?aE?&$ Aəq> :UFDw~޾@KBfT[e[e5QO3Q :c۾NkK־k'6Ҿ2_Zξ~aoEȾ{ZH>þ'iLoE 6Zs#v3 (0?b4u|ᓾD|QzͭX~knzWStLSn8A "?"^?arkX?$"?(1b?hnp?=?C}?*稷?qq 8?\ K?:D?"?\ a?8Kϧ?oΣ?e"f^?$@d?P*?ڧk??H? YZ?'rRDv?͏Zz?`2t?)G o?ǫ,g?K"a?ctY?aw>R?; OJ?S{$xB?$I:?CL2?{j*?x޶"?-c??.R ?>?+z{>.Ѡ%-6>iFF!>ON$>/r>G>;\>$=5>(o>P]Bx> ^>->m$>^x>Smwo>*e>\>CLR>@pjH>: η@>_tų*5>kTJ +>' e@@">8A 3?-ƸL]?~ᮠ?7 ?Ꮪ͞?8^c?ԲhS?%V?6V?M`5?I }N? bJM?ya L?, ??=(:?Ab?~ U?9ԕ?B6nɐ?s}?M=D ?TUf|?7gt?[ hn?&ҭLe?y_?K*~cV?4aȡO?~_s3F?GO>?($G{5?/-?(*T$?ˬ? )?Ik2 ?l,?)>4>w4o>8Lj>{>;> r>װ>r^>=>VeϠ>= >E>vYzϋ>HDƁ>ܑONv>`l>"Eb>9Ο=V> dL>Fd8YA> 915>̺*>ipF >c.Ǩ>l%>=R9=8A t_?k PDmU?~bo{?ƓD3?U?r}?.Rvɱ?("8?Mm?^' ?ٶ?6Xܼ>?=- ?um?踝?]8? 0x?_?e'|?= ?31]?G`?`xz? r?. [i?I~`?/luU?$eI?gӆJ?_A? Ǫ_%?.ĤK? r6? ?Z%JZ? B?e{z? v ?A¥!J|LZ o&5=M}Pzj?!n8K/\&'{-3`wQ%O#qfg mN4`6yxU/O7K/Av5L*=*1 py.8! 3Ԓ?*?Wn?rj?3?1 Y?y ٻ?'?y »?cV!?ů|Y?C񿙀JF2v/E<.~Rz`q&p u(@Wwg i!q`:5)g 2`{/(wUyƸKgۍƨAEڙ5R*yX0 ,~F-8! c u`?I?~W(!?*o>?*?ӉV?95e ?='7?fa?-E?7ôNf?-@hNa`llƳE(&De[E@{D1 ş`1NȐwpײgP+`[UN K.ANԉ5!]*5s# k8! fVx>dDJr?ώ(K?0{,?i_Xw?tō?-?3b e?[F#?N?$`΄LYͦ5瓿qfHߔ9a(JZȌ5lͬ/},0' ?->?&|f=bQ.Ups*(,bԺF^ሿzan,wWp]go27X_  U[&mK& oAam$5ނD*  aY8! H>T7>/7 ???$?ljz1?\8d?pP3?gyМ?a7fа?!Z ?\vm?ITp??dep{|69^\35SBI†Vr1f})Tvi2[ pVlf'^&h MTJB8A~1@5Gr. *C aeϙ8! u{m?2v?w*FM?VJz?"?uf2?,xٔ1?^Ϣ?Fͱ?ٮtT?xl>&*?_GF?Lt3" ?~jIr?Ѱ _nc?}hSS?BC?2?:;O."?[QJ?ߏz<?1j(&>A>{7>.ˑ*>3y^>b>M>l>B&jX>B*E>c={]@2>8!  D?~gޙ?c^?̱?&]u?'??]kbH?Y2L5?qc?i O?I'[u?<-jp#?W$4?dF|:q?Mz'a?;%ݑSR?fFqQB?s 1?Ӑc!?N'?δi/>ea{>V>E~W>Z#>;x>GD}>L~>34k>l>!W>P޽D>T 1>8!  ,̘x^sE?I㗶?Un߃?tcP?f ?>fQ&?y1?pNj ?4b?0?E?5ʤb'}?^fBn?jҲ^^?/|M?fK9;?(?N?2f?+(i>Cj>,DV>0 w nceA9Ơf/nT-(s°v>\jP,^8! =ܔUn?(ǥ?CI?eUO?q[,!??-k͘?7YS?f %?)ҽU?}&0ߙ?-@"?Aẁ?'>r?BJAb?p-GBS?U8C?: !2?{c`,"?m 0I?ةL;?=٩%>į>_7>tX>fq^>:j]>T賾>}l>oƖjX>E>*E>!FVZ@2>8! q.Fi?_Όez?إaϹ???f:u?d?xY?OArs?2&ڼ?Vq} ^?5?.?&X$|?sS)6χn?eQn_?/PP?- @?k෰o/?GN?K#/J ?ew>q>,w>c^{3>F>^>/A*>dĞz>dzGWg>7\XT>0A>l*y:k.>8! h'l3?MOo!?!9dи?J*_̞B?BQ1?V?Dl? x?ju'8?%~$?XK,?h?qk& ,x?#g?i{U?Q rB?HBN*?S. ?ZtUϾxKUy$pmkO銊$zp[޾ۂ&>ӾΔdǾp,g9g᣾]3E$l40T~8! )$l#n?%"?Xr_Zp?z!}?ġt*8?MiUD?jxa?1Oγ?xh?n^?gZ?<'?8*b?uQR?MB?[2?Thk"?0Ru>?koC6?(*>-B)>TD6>g>+GnV+^>)8=>Wf8Σ><l>ȁ,jX>5!=*E>,Y@2>8! Lm?~d!?E#d?t8VV?m~5?=h?O 5?zh;?ߤ?]ɠ?* ?J;߄?|iu?icf?A 3V?zQd{E?3>T4?s; C"?*p,?1b!>TF`>^sLs>= >rOϤϰ>ϛ>zס> Z^r>ܐ[e\>~ENE>SG0>NdD>,239>8! tye? X?7?m b?wM=?&>?6:!?#6?^?49 ??FF?&p?z) Y?.&H;?]8V)X _+أ=c,L TN3'7V1m Yy+`_7Ô>AH*RK +hTd,Ed "[־MAQ6ʾЩP澾=bN䶤#+)Ⱦe H?-<0?QY?2B~N?- @l?<⶘?7$P}ɰ[8=Hb✿6Yqn:RjL!G8h㑿fNeCgh\z~z|qόY9clTDE.4jm" eg?KTS?:?[F?E??Zey?Ÿ݈ܹuBQW:ځr 圿|6[ ӑ֌ 4tQil~%ڝUqHc]Z@TXPD7|'4c^" s($7U?%9?4?7`nN?zh?ln?{Y?iJqՓ J"xUwܱR$< -}t.釿.,~gxqFcV'TWDsT'4nƫ" kWJ?HrU´?iԧ>Y?K+T?!R?޻rT?7t/(?VP2>[ZT#UO'Me!Oz㑿`Cpz~EJv qFc!T_\D_>)/4"̹" 'jU?PD:?Ѳ7Qg?[qDmc"emBTf(nDB-4[ɸ" :)?z G?A-?`E?({?_{?솴?_g?=FOk?T! ^Ns"Գzy[#/}D 'UmY_|lBK0p +b'H TvPAPJ7Dن23O}N%" @#p^?)?>>?k2L?x?䴺?m{?XQ?8ۊւ?9Jl?/CWтT?b>u=JM͢>>["$۶>k&>L#9>tCPb>soD> S7w$?$?J?OD ??}P9h?mf?Nۨ?+Wܕ??dj?h(5|S?HӋ-:?v "?&1e?˫OuH>Bo9sB>q>ǘQ>589~>Ka>\PD> fQQc6 ?ٔd?t\?GH?3E?Nק?9utQ?5wL?lBf?,DMnN?+72?g,?>T)A>58-9d(ev#c' 2ؔqn Ec?e?RHp?V @9? UO? 7?P+/? S?莐?>`Wl?]guT?0Cú>0-۶>@fn&>?O,^>*3Pb>>r^D> ?ٍ[? zf?qUbd?C76H? R?ҭ\~??S]z?g?R\Q?l]S7?e?R٪K?껓>ajp흅>? >i韖>tnvz>P ^>6bA> Z?ʞP?ap?JA?|˜?|'k?Bϰ?Ou@?x-d?¿ 7jŦ[ z*P־R6B(ržQ0`;Mؓ' =?w ?3?_(? ?VB_)?6?X8?C>b?TcV^k?9tT?ݽ adOF>K۶>`|(&>4>}C Pb>>HKD> [?zqUGK?b8?[O]?-\s?`Z?n–?U#P?5'|w?b]X٦`?}B!9F?J,?hD?q5K>>I޴7>U> E&{>uuZ>j8>1<> 4O??ݩ_=?}o??[`9?w?HKյ?t揝?AC׋?p? |CND?7/k`K5O,أ.j single column design, no contrasts spec_0 = spec_maker((), ()) X_0, contrasts_0 = d_maker(spec_0, t) exp_x_0 = tc_maker(onsets==onsets) assert_almost_equal(X_0, exp_x_0) assert_dict_almost_equal(contrasts_0, {'constant_0': 1}) X_0, contrasts_0 = d_maker(spec_0, t, level_contrasts=True) assert_almost_equal(X_0, exp_x_0) assert_dict_almost_equal(contrasts_0, {'constant_0': 1, null_name + '_1_0': 1}) # Event spec with single factor, but only one level spec_1c = spec_maker((c_fac,), ('smt',)) X_1c, contrasts_1c = d_maker(spec_1c, t) assert_almost_equal(X_1c, exp_x_0) assert_dict_almost_equal(contrasts_1c, {'constant_0': 1}) X_1c, contrasts_1c = d_maker(spec_1c, t, level_contrasts=True) assert_dict_almost_equal(contrasts_1c, {'constant_0': 1, 'smt_1_0': 1}) # Event spec with single factor, two levels spec_1d = spec_maker((fac_1,), ('smt',)) exp_x_0 = tc_maker(fac_1 == 0) exp_x_1 = tc_maker(fac_1 == 1) X_1d, contrasts_1d = d_maker(spec_1d, t) assert_almost_equal(X_1d, np.c_[exp_x_0, exp_x_1]) assert_dict_almost_equal(contrasts_1d, {'constant_0': [1, 1], 'smt_0': [1, -1]}) X_1d, contrasts_1d = d_maker(spec_1d, t, level_contrasts=True) assert_dict_almost_equal(contrasts_1d, {'constant_0': 1, 'smt_0': [1, -1], # main effect 'smt_0_0': [1, 0], # level 0, hrf 0 'smt_1_0': [0, 1]}) # level 1, hrf 0 # Event spec with two factors, one with two levels, another with one spec_2dc = spec_maker((fac_1, c_fac), ('smt', 'smte')) X_2dc, contrasts_2dc = d_maker(spec_2dc, t) assert_almost_equal(X_2dc, np.c_[exp_x_0, exp_x_1]) assert_dict_almost_equal(contrasts_2dc, {'constant_0': [1, 1], 'smt_0': [1, -1], # main effect 'smt:smte_0': [1, -1], # interaction }) X_2dc, contrasts_2dc = d_maker(spec_2dc, t, level_contrasts=True) assert_dict_almost_equal(contrasts_2dc, {'constant_0': [1, 1], 'smt_0': [1, -1], # main effect 'smt:smte_0': [1, -1], # interaction 'smt_0*smte_1_0': [1, 0], # smt 0, smte 0, hrf 0 'smt_1*smte_1_0': [0, 1], # smt 1, smte 0, hrf 0 }) # Event spec with two factors, both with two levels spec_2dd = spec_maker((fac_1, fac_2), ('smt', 'smte')) exp_x_0 = tc_maker((fac_1 == 0) & (fac_2 == 0)) exp_x_1 = tc_maker((fac_1 == 0) & (fac_2 == 1)) exp_x_2 = tc_maker((fac_1 == 1) & (fac_2 == 0)) exp_x_3 = tc_maker((fac_1 == 1) & (fac_2 == 1)) X_2dd, contrasts_2dd = d_maker(spec_2dd, t) assert_almost_equal(X_2dd, np.c_[exp_x_0, exp_x_1, exp_x_2, exp_x_3]) exp_cons = {'constant_0': [1, 1, 1, 1], 'smt_0': [1, 1, -1, -1], # main effect fac_1 'smte_0': [1, -1, 1, -1], # main effect fac_2 'smt:smte_0': [1, -1, -1, 1], # interaction } assert_dict_almost_equal(contrasts_2dd, exp_cons) X_2dd, contrasts_2dd = d_maker(spec_2dd, t, level_contrasts=True) level_cons = exp_cons.copy() level_cons.update({ 'smt_0*smte_0_0': [1, 0, 0, 0], # smt 0, smte 0, hrf 0 'smt_0*smte_1_0': [0, 1, 0, 0], # smt 0, smte 1, hrf 0 'smt_1*smte_0_0': [0, 0, 1, 0], # smt 1, smte 0, hrf 0 'smt_1*smte_1_0': [0, 0, 0, 1], # smt 1, smte 1, hrf 0 }) assert_dict_almost_equal(contrasts_2dd, level_cons) # Test max order >> 2, no error X_2dd, contrasts_2dd = d_maker(spec_2dd, t, order=100) assert_almost_equal(X_2dd, np.c_[exp_x_0, exp_x_1, exp_x_2, exp_x_3]) assert_dict_almost_equal(contrasts_2dd, exp_cons) # Test max order = 1 X_2dd, contrasts_2dd = d_maker(spec_2dd, t, order=1) assert_almost_equal(X_2dd, np.c_[exp_x_0, exp_x_1, exp_x_2, exp_x_3]) # No interaction assert_dict_almost_equal(contrasts_2dd, {'constant_0': [1, 1, 1, 1], 'smt_0': [1, 1, -1, -1], # main effect fac_1 'smte_0': [1, -1, 1, -1], # main effect fac_2 }) # events : test field called "time" is necessary spec_1d = make_recarray(zip(onsets, fac_1), ('brighteyes', 'smt')) pytest.raises(ValueError, event_design, spec_1d, t) # blocks : test fields called "start" and "end" are necessary spec_1d = make_recarray(zip(onsets, offsets, fac_1), ('mister', 'brighteyes', 'smt')) pytest.raises(ValueError, block_design, spec_1d, t) spec_1d = make_recarray(zip(onsets, offsets, fac_1), ('start', 'brighteyes', 'smt')) pytest.raises(ValueError, block_design, spec_1d, t) spec_1d = make_recarray(zip(onsets, offsets, fac_1), ('mister', 'end', 'smt')) pytest.raises(ValueError, block_design, spec_1d, t) def assert_des_con_equal(one, two): des1, con1 = one des2, con2 = two assert_array_equal(des1, des2) assert set(con1) == set(con2) for key in con1: assert_array_equal(con1[key], con2[key]) def test_stack_designs(): # Test stack_designs function N = 10 X1 = np.ones((N, 1)) con1 = {'con1': np.array([1])} X2 = np.eye(N) con2 = {'con2': np.array([1] + [0] * (N -1))} sX, sc = stack_designs((X1, con1), (X2, con2)) X1_X2 = np.c_[X1, X2] exp = (X1_X2, {'con1': [1] + [0] * N, 'con2': [0, 1] + [0] * (N - 1)}) assert_des_con_equal((sX, sc), exp) # Result same when stacking just two designs sX, sc = stack2designs(X1, X2, {}, con2) # Stacking a design with empty design is OK assert_des_con_equal(stack2designs([], X2, con1, con2), (X2, con2)) assert_des_con_equal(stack_designs(([], con1), (X2, con2)), (X2, con2)) assert_des_con_equal(stack2designs(X1, [], con1, con2), (X1, con1)) assert_des_con_equal(stack_designs((X1, con1), ([], con2)), (X1, con1)) # Stacking one design returns output unmodified assert_des_con_equal(stack_designs((X1, con1)), (X1, con1)) # Can stack design without contrasts assert_des_con_equal(stack_designs(X1, X2), (X1_X2, {})) assert_des_con_equal(stack_designs(X1, (X2, con2)), (X1_X2, {'con2': [0, 1] + [0] * (N - 1)})) assert_des_con_equal(stack_designs((X1, con1), X2), (X1_X2, {'con1': [1] + [0] * N})) # No-contrasts can also be 1-length tuple assert_des_con_equal(stack_designs((X1,), (X2, con2)), (X1_X2, {'con2': [0, 1] + [0] * (N - 1)})) assert_des_con_equal(stack_designs((X1, con1), (X2,)), (X1_X2, {'con1': [1] + [0] * N})) # Stack three X3 = np.arange(N)[:, None] con3 = {'con3': np.array([1])} assert_des_con_equal( stack_designs((X1, con1), (X2, con2), (X3, con3)), (np.c_[X1, X2, X3], {'con1': [1, 0] + [0] * N, 'con2': [0, 1] + [0] * N, 'con3': [0] * N + [0, 1]})) def test_openfmri2nipy(): # Test loading / processing OpenFMRI stimulus file stim_file = pjoin(THIS_DIR, 'cond_test1.txt') ons_dur_amp = np.loadtxt(stim_file) onsets, durations, amplitudes = ons_dur_amp.T for in_param in (stim_file, ons_dur_amp): res = openfmri2nipy(in_param) assert res.dtype.names == ('start', 'end', 'amplitude') assert_array_equal(res['start'], onsets) assert_array_equal(res['end'], onsets + durations) assert_array_equal(res['amplitude'], amplitudes) def test_block_amplitudes(): # Test event design helper function # An event design with one event type onsets = np.array([0, 20, 40, 60]) durations = np.array([2, 3, 4, 5]) offsets = onsets + durations amplitudes = [3, 2, 1, 4] t = np.arange(0, 100, 2.5) def mk_blk_tc(amplitudes=None, hrf=glover): func_amp = blocks(zip(onsets, offsets), amplitudes) # Make real time course for block onset / offsets / amplitudes term = convolve_functions(func_amp, hrf(T), (-5, 70), # step func support (0, 30.), # conv kernel support 0.02) # dt return lambdify_t(term)(t) no_amps = make_recarray(zip(onsets, offsets), ('start', 'end')) amps = make_recarray(zip(onsets, offsets, amplitudes), ('start', 'end', 'amplitude')) X, contrasts = block_amplitudes('ev0', no_amps, t) assert_almost_equal(X, mk_blk_tc()) assert_dict_almost_equal(contrasts, {'ev0_0': 1}) # Same thing as 2D array X, contrasts = block_amplitudes('ev0', np.c_[onsets, offsets], t) assert_almost_equal(X, mk_blk_tc()) assert_dict_almost_equal(contrasts, {'ev0_0': 1}) # Now as list X, contrasts = block_amplitudes('ev0', list(zip(onsets, offsets)), t) assert_almost_equal(X, mk_blk_tc()) assert_dict_almost_equal(contrasts, {'ev0_0': 1}) # Add amplitudes X_a, contrasts_a = block_amplitudes('ev1', amps, t) assert_almost_equal(X_a, mk_blk_tc(amplitudes=amplitudes)) assert_dict_almost_equal(contrasts_a, {'ev1_0': 1}) # Same thing as 2D array X_a, contrasts_a = block_amplitudes('ev1', np.c_[onsets, offsets, amplitudes], t) assert_almost_equal(X_a, mk_blk_tc(amplitudes=amplitudes)) assert_dict_almost_equal(contrasts_a, {'ev1_0': 1}) # Add another HRF X_2, contrasts_2 = block_amplitudes('ev0', no_amps, t, (glover, dglover)) assert_almost_equal(X_2, np.c_[mk_blk_tc(), mk_blk_tc(hrf=dglover)]) assert_dict_almost_equal(contrasts_2, {'ev0_0': [1, 0], 'ev0_1': [0, 1]}) # Errors on bad input no_start = make_recarray(zip(onsets, offsets), ('begin', 'end')) pytest.raises(ValueError, block_amplitudes, 'ev0', no_start, t) no_end = make_recarray(zip(onsets, offsets), ('start', 'finish')) pytest.raises(ValueError, block_amplitudes, 'ev0', no_end, t) funny_amp = make_recarray(zip(onsets, offsets, amplitudes), ('start', 'end', 'intensity')) pytest.raises(ValueError, block_amplitudes, 'ev0', funny_amp, t) funny_extra = make_recarray(zip(onsets, offsets, amplitudes, onsets), ('start', 'end', 'amplitude', 'extra_field')) pytest.raises(ValueError, block_amplitudes, 'ev0', funny_extra, t) nipy-0.6.1/nipy/modalities/fmri/tests/test_dmtx.py000066400000000000000000000414301470056100100222660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the design_matrix utilities. Note that the tests just looks whether the data produces has correct dimension, not whether it is exact """ import os.path as osp from unittest import skipIf import numpy as np from numpy.testing import assert_almost_equal, assert_array_equal from ..design_matrix import ( _convolve_regressors, _cosine_drift, dmtx_from_csv, dmtx_light, make_dmtx, ) #from os.path import join, dirname, walk from ..experimental_paradigm import BlockParadigm, EventRelatedParadigm try: import matplotlib.pyplot except ImportError: have_mpl = False else: have_mpl = True # load the spm file to test cosine basis my_path = osp.dirname(osp.abspath(__file__)) full_path_dmtx_file = osp.join(my_path, 'spm_dmtx.npz') DMTX = np.load(full_path_dmtx_file) def basic_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) return paradigm def modulated_block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 + 5 * np.random.rand(len(onsets)) values = 1 + np.random.rand(len(onsets)) paradigm = BlockParadigm(conditions, onsets, duration, values) return paradigm def modulated_event_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] values = 1 + np.random.rand(len(onsets)) paradigm = EventRelatedParadigm(conditions, onsets, values) return paradigm def block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 * np.ones(9) paradigm = BlockParadigm (conditions, onsets, duration) return paradigm @skipIf(not have_mpl, reason="Needs matplotlib") def test_show_dmtx(): # test that the show code indeed (formally) runs frametimes = np.linspace(0, 127 * 1.,128) DM = make_dmtx(frametimes, drift_model='polynomial', drift_order=3) ax = DM.show() assert (ax is not None) # test the colormap ax = DM.show(cmap=matplotlib.pyplot.cm.gray) assert (ax is not None) def test_cosine_drift(): # add something so that when the tests are launched from a different directory # we still find the file ' 'dctmtx_N_20_order_4.txt' ? spm_drifts = DMTX['cosbf_dt_1_nt_20_hcut_0p1'] # np.loadtxt('dctmtx_N_20_order_4.txt') tim = np.arange(20) P = 10 # period is half the time, gives us an order 4 nipy_drifts = _cosine_drift(P, tim) # assert_almost_equal(spm_drifts[:,1:], nipy_drifts[:,:-1]) # nipy_drifts is placing the constant at the end [:,:-1] @skipIf(not have_mpl, reason="Needs matplotlib") def test_show_constrast(): # test that the show code indeed (formally) runs frametimes = np.linspace(0, 127 * 1.,128) DM = make_dmtx(frametimes, drift_model='polynomial', drift_order=3) contrast = np.random.standard_normal((3, DM.matrix.shape[1])) ax = DM.show_contrast(contrast) assert (ax is not None) # test the colormap ax = DM.show_contrast(contrast, cmap=matplotlib.pyplot.cm.gray) assert (ax is not None) def test_dmtx0(): # Test design matrix creation when no paradigm is provided tr = 1.0 frametimes = np.linspace(0, 127 * tr,128) X, names= dmtx_light(frametimes, drift_model='polynomial', drift_order=3) assert len(names) == 4 def test_dmtx0b(): # Test design matrix creation when no paradigm is provided tr = 1.0 frametimes = np.linspace(0, 127 * tr,128) X, names= dmtx_light(frametimes, drift_model='polynomial', drift_order=3) assert_almost_equal(X[:, 0], np.linspace(- 0.5, .5, 128)) def test_dmtx0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) X, names= dmtx_light(frametimes, drift_model='polynomial', drift_order=3, add_regs=ax) assert_almost_equal(X[:, 0], ax[:, 0]) def test_dmtx0d(): # test design matrix creation when regressors are provided manually tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) X, names= dmtx_light(frametimes, drift_model='polynomial', drift_order=3, add_regs=ax) assert len(names) == 8 assert X.shape[1] == 8 def test_dmtx1(): # basic test based on basic_paradigm and canonical hrf tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 7 def test_convolve_regressors(): # tests for convolve_regressors helper function conditions = ['c0', 'c1'] onsets = [20, 40] paradigm = EventRelatedParadigm(conditions, onsets) # names not passed -> default names frametimes = np.arange(100) f, names = _convolve_regressors(paradigm, 'canonical', frametimes) assert names == ['c0', 'c1'] def test_dmtx1b(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert X.shape == (128, 7) def test_dmtx1c(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 *tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert (X[:, - 1] == 1).all() def test_dmtx1d(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert (np.isnan(X) == 0).all() def test_dmtx2(): # idem test_dmtx1 with a different drift term tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='cosine', hfcut=63) assert len(names) == 7 # was 8 with old cosine def test_dmtx3(): # idem test_dmtx1 with a different drift term tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='blank') assert len(names) == 4 def test_dmtx4(): # idem test_dmtx1 with a different hrf model tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical With Derivative' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 10 def test_dmtx5(): # idem test_dmtx1 with a block paradigm tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = block_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 7 def test_dmtx6(): # idem test_dmtx1 with a block paradigm and the hrf derivative tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = block_paradigm() hrf_model = 'Canonical With Derivative' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 10 def test_dmtx7(): # idem test_dmtx1, but odd paradigm tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) conditions = [0, 0, 0, 1, 1, 1, 3, 3, 3] # no condition 'c2' onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 7 def test_dmtx8(): # basic test based on basic_paradigm and FIR tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert len(names) == 7 def test_dmtx9(): # basic test based on basic_paradigm and FIR tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) assert len(names) == 16 def test_dmtx10(): # Check that the first column o FIR design matrix is OK tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) assert np.all(X[onset + 1, 0] == 1) def test_dmtx11(): # check that the second column of the FIR design matrix is OK indeed tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) assert np.all(X[onset + 3, 2] == 1) def test_dmtx12(): # check that the 11th column of a FIR design matrix is indeed OK tr = 1.0 frametimes = np.linspace(0, 127 * tr,128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) onset = paradigm.onset[paradigm.con_id == 'c2'].astype(np.int_) assert np.all(X[onset + 4, 11] == 1) def test_dmtx13(): # Check that the fir_duration is well taken into account tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) assert np.all(X[onset + 1, 0] == 1) def test_dmtx14(): # Check that the first column o FIR design matrix is OK after a 1/2 # time shift tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) + tr / 2 paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) assert np.all(X[onset + 1, 0] > .9) def test_dmtx15(): # basic test based on basic_paradigm, plus user supplied regressors tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' ax = np.random.randn(128, 4) X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, add_regs=ax) assert len(names) == 11 assert X.shape[1] == 11 def test_dmtx16(): # Check that additional regressors are put at the right place tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' ax = np.random.randn(128, 4) X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, add_regs=ax) assert_almost_equal(X[:, 3: 7], ax) def test_dmtx17(): # Test the effect of scaling on the events tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) + 1 assert (X[ct, 0] > 0).all() def test_dmtx18(): # Test the effect of scaling on the blocks tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_block_paradigm() hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) + 3 assert (X[ct, 0] > 0).all() def test_dmtx19(): # Test the effect of scaling on a FIR model tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=list(range(1, 5))) idx = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int_) assert_array_equal(X[idx + 1, 0], X[idx + 2, 1]) def test_dmtx20(): # Test for commit 10662f7 frametimes = np.arange(0, 128) # was 127 in old version of _cosine_drift paradigm = modulated_event_paradigm() X, names = dmtx_light(frametimes, paradigm, hrf_model='canonical', drift_model='cosine') # check that the drifts are not constant assert np.all(np.diff(X[:, -2]) != 0) def test_fir_block(): # tets FIR models on block designs bp = block_paradigm() tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) X, names = dmtx_light(frametimes, bp, hrf_model='fir', drift_model='blank', fir_delays=list(range(4))) idx = bp.onset[bp.con_id == 'c1'].astype(np.int_) assert X.shape == (128, 13) assert (X[idx, 4] == 1).all() assert (X[idx + 1, 5] == 1).all() assert (X[idx + 2, 6] == 1).all() assert (X[idx + 3, 7] == 1).all() def test_csv_io(in_tmp_path): # test the csv io on design matrices tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() DM = make_dmtx(frametimes, paradigm, hrf_model='Canonical', drift_model='polynomial', drift_order=3) path = 'dmtx.csv' DM.write_csv(path) DM2 = dmtx_from_csv(path) assert_almost_equal(DM.matrix, DM2.matrix) assert DM.names == DM2.names def test_spm_1(): # Check that the nipy design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frametimes = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) X1 = make_dmtx(frametimes, paradigm, drift_model='blank') spm_dmtx = DMTX['arr_0'] assert (((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1) def test_spm_2(): # Check that the nipy design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frametimes = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] duration = 10 * np.ones(9) paradigm = BlockParadigm(conditions, onsets, duration) X1 = make_dmtx(frametimes, paradigm, drift_model='blank') spm_dmtx = DMTX['arr_1'] assert (((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1) def test_frametimes_as_a_list(): # design matrix should work with frametimes provided as a list paradigm = basic_paradigm() frametimes = list(range(99)) X1 = make_dmtx(frametimes, paradigm, drift_model='blank') frametimes = np.arange(0, 99) X2 = make_dmtx(frametimes, paradigm, drift_model='blank') assert_array_equal(X1.matrix, X2.matrix) nipy-0.6.1/nipy/modalities/fmri/tests/test_fmri.py000066400000000000000000000035241470056100100222510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import gc import numpy as np import pytest from nipy.core.api import AffineTransform as AfT from nipy.core.api import Image, parcels from nipy.io.api import load_image, save_image from nipy.modalities.fmri.api import FmriImageList, axis0_generator from nipy.testing import funcfile @pytest.mark.filterwarnings("ignore:" "Default `strict` currently False:" "FutureWarning") def test_write(in_tmp_path): fname = 'myfile.nii' img = load_image(funcfile) save_image(img, fname) test = FmriImageList.from_image(load_image(fname)) assert test[0].affine.shape == (4,4) assert img[0].affine.shape == (5,4) # Check the affine... A = np.identity(4) A[:3,:3] = img[:,:,:,0].affine[:3,:3] A[:3,-1] = img[:,:,:,0].affine[:3,-1] assert np.allclose(test[0].affine, A) del test def test_iter(): img = load_image(funcfile) img_shape = img.shape exp_shape = (img_shape[0],) + img_shape[2:] j = 0 for i, d in axis0_generator(img.get_fdata()): j += 1 assert d.shape == exp_shape del(i); gc.collect() assert j == img_shape[1] def test_subcoordmap(): img = load_image(funcfile) subcoordmap = img[3].coordmap xform = img.affine[:,1:] assert np.allclose(subcoordmap.affine[1:], xform[1:]) assert np.allclose(subcoordmap.affine[0], [0,0,0,img.coordmap([3,0,0,0])[0]]) def test_labels1(): img = load_image(funcfile) data = img.get_fdata() parcelmap = Image(img[0].get_fdata(), AfT('kji', 'zyx', np.eye(4))) parcelmap = (parcelmap.get_fdata() * 100).astype(np.int32) v = 0 for i, d in axis0_generator(data, parcels(parcelmap)): v += d.shape[1] assert v == parcelmap.size nipy-0.6.1/nipy/modalities/fmri/tests/test_glm.py000066400000000000000000000264711470056100100221010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the glm utilities. """ import numpy as np import pytest from nibabel import Nifti1Image, load, save from numpy.testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from nipy.io.nibcompat import get_affine from nipy.testing import funcfile from nipy.testing.decorators import if_example_data from ..glm import FMRILinearModel, GeneralLinearModel, data_scaling def write_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): mask_file, fmri_files, design_files = 'mask.nii', [], [] for i, shape in enumerate(shapes): fmri_files.append('fmri_run%d.nii' %i) data = 100 + np.random.randn(*shape) data[0] -= 10 save(Nifti1Image(data, affine), fmri_files[-1]) design_files.append('dmtx_%d.npz' %i) np.savez(design_files[-1], np.random.randn(shape[3], rk)) save(Nifti1Image((np.random.rand(*shape[:3]) > .5).astype(np.int8), affine), mask_file) return mask_file, fmri_files, design_files def generate_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): fmri_data = [] design_matrices = [] for shape in shapes: data = 100 + np.random.randn(*shape) data[0] -= 10 fmri_data.append(Nifti1Image(data, affine)) design_matrices.append(np.random.randn(shape[3], rk)) mask = Nifti1Image((np.random.rand(*shape[:3]) > .5).astype(np.int8), affine) return mask, fmri_data, design_matrices def test_high_level_glm_with_paths(in_tmp_path): shapes, rk = ((5, 6, 4, 20), (5, 6, 4, 19)), 3 mask_file, fmri_files, design_files = write_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert_array_equal(get_affine(z_image), get_affine(load(mask_file))) assert z_image.get_fdata().std() < 3. # Delete objects attached to files to avoid WindowsError when deleting # temporary directory del z_image, fmri_files, multi_session_model def test_high_level_glm_with_data(in_tmp_path): shapes, rk = ((7, 6, 5, 20), (7, 6, 5, 19)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) # without mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert np.sum(z_image.get_fdata() == 0) == 0 # compute the mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, m=0, M=.01, threshold=0.) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert z_image.get_fdata().std() < 3. # with mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask) multi_session_model.fit() z_image, effect_image, variance_image= multi_session_model.contrast( [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True) assert_array_equal(z_image.get_fdata() == 0., load(mask).get_fdata() == 0.) assert (variance_image.get_fdata()[load(mask).get_fdata() > 0, 0] > .001).all() # without scaling multi_session_model.fit(do_scaling=False) z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert z_image.get_fdata().std() < 3. def test_high_level_glm_contrasts(in_tmp_path): shapes, rk = ((5, 6, 7, 20), (5, 6, 7, 19)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[:2]] * 2, contrast_type='tmin-conjunction') z1, = multi_session_model.contrast([np.eye(rk)[:1]] * 2) z2, = multi_session_model.contrast([np.eye(rk)[1:2]] * 2) assert (z_image.get_fdata() < np.maximum( z1.get_fdata(), z2.get_fdata())).all() def test_high_level_glm_null_contrasts(): shapes, rk = ((5, 6, 7, 20), (5, 6, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel( fmri_data, design_matrices, mask=None) multi_session_model.fit() single_session_model = FMRILinearModel( fmri_data[:1], design_matrices[:1], mask=None) single_session_model.fit() z1, = multi_session_model.contrast([np.eye(rk)[:1], np.zeros((1, rk))]) z2, = single_session_model.contrast([np.eye(rk)[:1]]) np.testing.assert_almost_equal(z1.get_fdata(), z2.get_fdata()) def ols_glm(n=100, p=80, q=10): X, Y = np.random.randn(p, q), np.random.randn(p, n) glm = GeneralLinearModel(X) glm.fit(Y, 'ols') return glm, n, p, q def ar1_glm(n=100, p=80, q=10): X, Y = np.random.randn(p, q), np.random.randn(p, n) glm = GeneralLinearModel(X) glm.fit(Y, 'ar1') return glm, n, p, q def test_glm_ols(): mulm, n, p, q = ols_glm() assert_array_equal(mulm.labels_, np.zeros(n)) assert list(mulm.results_) == [0.0] assert mulm.results_[0.0].theta.shape == (q, n) assert_almost_equal(mulm.results_[0.0].theta.mean(), 0, 1) assert_almost_equal(mulm.results_[0.0].theta.var(), 1. / p, 1) def test_glm_beta(): mulm, n, p, q = ols_glm() assert mulm.get_beta().shape == (q, n) assert mulm.get_beta([0, -1]).shape == (2, n) assert mulm.get_beta(6).shape == (1, n) def test_glm_mse(): mulm, n, p, q = ols_glm() mse = mulm.get_mse() assert_array_almost_equal(mse, np.ones(n), 0) def test_glm_logL(): mulm, n, p, q = ols_glm() logL = mulm.get_logL() assert_array_almost_equal(logL / n, - p * 1.41 * np.ones(n) / n, 0) def test_glm_ar(): mulm, n, p, q = ar1_glm() assert len(mulm.labels_) == n assert len(mulm.results_) > 1 tmp = sum(mulm.results_[key].theta.shape[1] for key in mulm.results_) assert tmp == n def test_Tcontrast(): mulm, n, p, q = ar1_glm() cval = np.hstack((1, np.ones(9))) z_vals = mulm.contrast(cval).z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_1d(): mulm, n, p, q = ar1_glm() cval = np.hstack((1, np.ones(9))) con = mulm.contrast(cval, contrast_type='F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_nd(): mulm, n, p, q = ar1_glm() cval = np.eye(q)[:3] con = mulm.contrast(cval) assert con.contrast_type == 'F' z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_1d_old(): mulm, n, p, q = ols_glm() cval = np.hstack((1, np.ones(9))) con = mulm.contrast(cval, contrast_type='F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_nd_ols(): mulm, n, p, q = ols_glm() cval = np.eye(q)[:3] con = mulm.contrast(cval) assert con.contrast_type == 'F' z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_t_contrast_add(): mulm, n, p, q = ols_glm() c1, c2 = np.eye(q)[0], np.eye(q)[1] con = mulm.contrast(c1) + mulm.contrast(c2) z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_F_contrast_add(): mulm, n, p, q = ar1_glm() # first test with independent contrast c1, c2 = np.eye(q)[:2], np.eye(q)[2:4] con = mulm.contrast(c1) + mulm.contrast(c2) z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) # first test with dependent contrast con1 = mulm.contrast(c1) con2 = mulm.contrast(c1) + mulm.contrast(c1) assert_almost_equal(con1.effect * 2, con2.effect) assert_almost_equal(con1.variance * 2, con2.variance) assert_almost_equal(con1.stat() * 2, con2.stat()) def test_t_contrast_mul(): mulm, n, p, q = ar1_glm() con1 = mulm.contrast(np.eye(q)[0]) con2 = con1 * 2 assert_almost_equal(con1.z_score(), con2.z_score()) assert_almost_equal(con1.effect * 2, con2.effect) def test_F_contrast_mul(): mulm, n, p, q = ar1_glm() con1 = mulm.contrast(np.eye(q)[:4]) con2 = con1 * 2 assert_almost_equal(con1.z_score(), con2.z_score()) assert_almost_equal(con1.effect * 2, con2.effect) def test_t_contrast_values(): mulm, n, p, q = ar1_glm(n=1) cval = np.eye(q)[0] con = mulm.contrast(cval) t_ref = list(mulm.results_.values()).pop().Tcontrast(cval).t assert_almost_equal(np.ravel(con.stat()), t_ref) def test_F_contrast_calues(): mulm, n, p, q = ar1_glm(n=1) cval = np.eye(q)[:3] con = mulm.contrast(cval) F_ref = list(mulm.results_.values()).pop().Fcontrast(cval).F # Note that the values are not strictly equal, # this seems to be related to a bug in Mahalanobis assert_almost_equal(np.ravel(con.stat()), F_ref, 3) def test_tmin(): mulm, n, p, q = ar1_glm(n=1) c1, c2, c3 = np.eye(q)[0], np.eye(q)[1], np.eye(q)[2] t1, t2, t3 = mulm.contrast(c1).stat(), mulm.contrast(c2).stat(), \ mulm.contrast(c3).stat() tmin = min(t1, t2, t3) con = mulm.contrast(np.eye(q)[:3], 'tmin-conjunction') assert con.stat() == tmin def test_scaling(): """Test the scaling function""" shape = (400, 10) u = np.random.randn(*shape) mean = 100 * np.random.rand(shape[1]) Y = u + mean Y, mean_ = data_scaling(Y) assert_almost_equal(Y.mean(0), 0) assert_almost_equal(mean_, mean, 0) assert Y.std() > 1 def test_fmri_inputs(in_tmp_path): # Test processing of FMRI inputs func_img = load(funcfile) T = func_img.shape[-1] des = np.ones((T, 1)) des_fname = 'design.npz' np.savez(des_fname, des) for fi in func_img, funcfile: for d in des, des_fname: fmodel = FMRILinearModel(fi, d, mask='compute') fmodel = FMRILinearModel([fi], d, mask=None) fmodel = FMRILinearModel(fi, [d], mask=None) fmodel = FMRILinearModel([fi], [d], mask=None) fmodel = FMRILinearModel([fi, fi], [d, d], mask=None) fmodel = FMRILinearModel((fi, fi), (d, d), mask=None) pytest.raises(ValueError, FMRILinearModel, [fi, fi], d, mask=None) pytest.raises(ValueError, FMRILinearModel, fi, [d, d], mask=None) @if_example_data def test_fmri_example(): # Run FMRI analysis using example data from nipy.utils import example_data fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') multi_session_model = FMRILinearModel(fmri_files, design_files, mask) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(13)[1]] * 2) # Check number of voxels with p < 0.001 assert np.sum(z_image.get_fdata() > 3.09) == 671 nipy-0.6.1/nipy/modalities/fmri/tests/test_hemodynamic_models.py000066400000000000000000000166761470056100100251700ustar00rootroot00000000000000import warnings import numpy as np import pytest from numpy.testing import ( assert_almost_equal, assert_array_equal, ) from ..hemodynamic_models import ( _hrf_kernel, _orthogonalize, _regressor_names, _resample_regressor, _sample_condition, compute_regressor, glover_hrf, glover_time_derivative, spm_dispersion_derivative, spm_hrf, spm_time_derivative, ) def test_spm_hrf(): """ test that the spm_hrf is correctly normalized and has correct length """ h = spm_hrf(2.0) assert_almost_equal(h.sum(), 1) assert len(h) == 256 def test_spm_hrf_derivative(): """ test that the spm_hrf is correctly normalized and has correct length """ h = spm_time_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 h = spm_dispersion_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 def test_glover_hrf(): """ test that the spm_hrf is correctly normalized and has correct length """ h = glover_hrf(2.0) assert_almost_equal(h.sum(), 1) assert len(h) == 256 def test_glover_time_derivative(): """ test that the spm_hrf is correctly normalized and has correct length """ h = glover_time_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 def test_resample_regressor(): """ test regressor resampling on a linear function """ x = np.linspace(0, 1, 200) y = np.linspace(0, 1, 30) z = _resample_regressor(x, x, y) assert_almost_equal(z, y) def test_resample_regressor_nl(): """ test regressor resampling on a sine function """ x = np.linspace(0, 10, 1000) y = np.linspace(0, 10, 30) z = _resample_regressor(np.cos(x), x, y) assert_almost_equal(z, np.cos(y), decimal=2) def test_orthogonalize(): """ test that the orthogonalization is OK """ X = np.random.randn(100, 5) X = _orthogonalize(X) K = np.dot(X.T, X) K -= np.diag(np.diag(K)) assert_almost_equal((K ** 2).sum(), 0, 15) def test_orthogonalize_trivial(): """ test that the orthogonalization is OK """ X = np.random.randn(100) Y = X.copy() X = _orthogonalize(X) assert_array_equal(Y, X) def test_sample_condition_1(): """ Test that the experimental condition is correctly sampled """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = _sample_condition(condition, frametimes, oversampling=1, min_onset=0) assert reg.sum() == 3 assert reg[1] == 1 assert reg[20] == 1 assert reg[37] == 1 reg, rf = _sample_condition(condition, frametimes, oversampling=1) assert reg.sum() == 3 assert reg[25] == 1 assert reg[44] == 1 assert reg[61] == 1 def test_sample_condition_2(): """ Test the experimental condition sampling -- onset = 0 """ condition = ([0, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = _sample_condition(condition, frametimes, oversampling=1, min_onset=- 10) assert reg.sum() == 6 assert reg[10] == 1 assert reg[48] == 1 assert reg[31] == 1 def test_sample_condition_3(): """ Test the experimental condition sampling -- oversampling=10 """ condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = _sample_condition(condition, frametimes, oversampling=10, min_onset=0) assert_almost_equal(reg.sum(), 60.) assert reg[10] == 1 assert reg[380] == 1 assert reg[210] == 1 assert np.sum(reg > 0) == 60 def test_sample_condition_4(): """ Test the experimental condition sampling -- negative amplitude """ condition = ([1, 20, 36.5], [2, 2, 2], [1., -1., 5.]) frametimes = np.linspace(0, 49, 50) reg, rf = _sample_condition(condition, frametimes, oversampling=1) assert reg.sum() ==10 assert reg[25] == 1. assert reg[44] == -1. assert reg[61] == 5. def test_sample_condition_5(): """ Test the experimental condition sampling -- negative onset """ condition = ([-10, 0, 36.5], [2, 2, 2], [1., -1., 5.]) frametimes = np.linspace(0, 49, 50) reg, rf = _sample_condition(condition, frametimes, oversampling=1) assert reg.sum() ==10 assert reg[14] == 1. assert reg[24] == -1. assert reg[61] == 5. def test_names(): """ Test the regressor naming function """ name = 'con' assert _regressor_names(name, 'spm') == ['con'] assert _regressor_names(name, 'spm_time') == ['con', 'con_derivative'] assert (_regressor_names(name, 'spm_time_dispersion') == ['con', 'con_derivative', 'con_dispersion']) assert _regressor_names(name, 'canonical') == ['con'] assert (_regressor_names(name, 'canonical with derivative') == ['con', 'con_derivative']) def test_hkernel(): """ test the hrf computation """ tr = 2.0 h = _hrf_kernel('spm', tr) assert_almost_equal(h[0], spm_hrf(tr)) assert len(h) == 1 h = _hrf_kernel('spm_time', tr) assert_almost_equal(h[1], spm_time_derivative(tr)) assert len(h) == 2 h = _hrf_kernel('spm_time_dispersion', tr) assert_almost_equal(h[2], spm_dispersion_derivative(tr)) assert len(h) == 3 h = _hrf_kernel('canonical', tr) assert_almost_equal(h[0], glover_hrf(tr)) assert len(h) == 1 h = _hrf_kernel('canonical with derivative', tr) assert_almost_equal(h[1], glover_time_derivative(tr)) assert_almost_equal(h[0], glover_hrf(tr)) assert len(h) == 2 h = _hrf_kernel('fir', tr, fir_delays = np.arange(4)) assert len(h) == 4 for dh in h: assert dh.sum() == 16. def test_make_regressor_1(): """ test the generated regressor """ condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 69, 70) hrf_model = 'spm' reg, reg_names = compute_regressor(condition, hrf_model, frametimes) assert_almost_equal(reg.sum(), 6, 1) assert reg_names[0] == 'cond' def test_make_regressor_2(): """ test the generated regressor """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 69, 70) hrf_model = 'spm' reg, reg_names = compute_regressor(condition, hrf_model, frametimes) assert_almost_equal(reg.sum() * 16, 3, 1) assert reg_names[0] == 'cond' def test_make_regressor_3(): """ test the generated regressor """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 138, 70) hrf_model = 'fir' reg, reg_names = compute_regressor(condition, hrf_model, frametimes, fir_delays=np.arange(4)) assert_array_equal(np.unique(reg), np.array([0, 1])) assert_array_equal(np.sum(reg, 0), np.array([3, 3, 3, 3])) assert len(reg_names) == 4 def test_design_warnings(): """ test that warnings are correctly raised upon weird design specification """ condition = ([-25, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 69, 70) hrf_model = 'spm' with warnings.catch_warnings(record=True): warnings.simplefilter("always") pytest.warns(UserWarning, compute_regressor, condition, hrf_model, frametimes) condition = ([-25, -25, 36.5], [0, 0, 0], [1, 1, 1]) with warnings.catch_warnings(record=True): warnings.simplefilter("always") pytest.warns(UserWarning, compute_regressor, condition, hrf_model, frametimes) nipy-0.6.1/nipy/modalities/fmri/tests/test_hrf.py000066400000000000000000000070431470056100100220730ustar00rootroot00000000000000""" Testing hrf module """ from os.path import dirname from os.path import join as pjoin import numpy as np import pytest import scipy.io as sio from numpy.testing import assert_almost_equal from scipy.stats import gamma from ..hrf import ( ddspmt, dspmt, gamma_expr, gamma_params, lambdify_t, spm_hrf_compat, spmt, ) def test_gamma(): t = np.linspace(0, 30, 5000) # make up some numbers pk_t = 5.0 fwhm = 6.0 # get the estimated parameters shape, scale, coef = gamma_params(pk_t, fwhm) # get distribution function g_exp = gamma_expr(pk_t, fwhm) # make matching standard distribution gf = gamma(shape, scale=scale).pdf # get values L1t = gf(t) L2t = lambdify_t(g_exp)(t) # they are the same bar a scaling factor nz = np.abs(L1t) > 1e-15 sf = np.mean(L1t[nz] / L2t[nz]) assert_almost_equal(L1t , L2t*sf) def test_spm_hrf(): # Regression tests for spm hrf, time derivative and dispersion derivative # Check that absolute values don't change (much) with different dt, and that # max values are roughly the same and in the same place in time for dt in 0.1, 0.01, 0.001: t_vec = np.arange(0, 32, dt) hrf = spmt(t_vec) assert_almost_equal(np.max(hrf), 0.21053, 5) assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2) dhrf = dspmt(t_vec) assert_almost_equal(np.max(dhrf), 0.08, 3) assert_almost_equal(t_vec[np.argmax(dhrf)], 3.3, 1) dhrf = ddspmt(t_vec) assert_almost_equal(np.max(dhrf), 0.10, 2) assert_almost_equal(t_vec[np.argmax(dhrf)], 5.7, 1) # Test reversed time vector to check that order of time values does not # affect result rt_vec = np.arange(0, 32, 0.01) rhrf = spmt(rt_vec) assert_almost_equal(np.max(rhrf), 0.21053, 5) assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2) def test_spm_hrf_octave(): # Test SPM hrf against output from SPM code running in Octave my_path = dirname(__file__) hrfs_path = pjoin(my_path, 'spm_hrfs.mat') # mat file resulting from make_hrfs.m hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True) params = hrfs_mat['params'] hrfs = hrfs_mat['hrfs'] for i, pvec in enumerate(params): dt, ppk, upk, pdsp, udsp, rat = pvec t_vec = np.arange(0, 32.1, dt) our_hrf = spm_hrf_compat(t_vec, peak_delay=ppk, peak_disp=pdsp, under_delay=upk, under_disp=udsp, p_u_ratio=rat) # Normalize integral to match SPM assert_almost_equal(our_hrf, hrfs[i]) # Test basis functions # mat file resulting from get_td_dd.m bases_path = pjoin(my_path, 'spm_bases.mat') bases_mat = sio.loadmat(bases_path, squeeze_me=True) dt = bases_mat['dt'] t_vec = np.arange(0, 32 + dt, dt) # SPM function divides by sum of values - revert with dt assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4) assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4) assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4) def test_spm_hrf_errors(): t_vec = np.arange(0, 32) # All 1s is fine res = spm_hrf_compat(t_vec, 1, 1, 1, 1) # 0 or negative raise error for other args args = [0] for i in range(4): pytest.raises(ValueError, spm_hrf_compat, t_vec, *args) args[-1] = -1 pytest.raises(ValueError, spm_hrf_compat, t_vec, *args) args[-1] = 1 args.append(0) nipy-0.6.1/nipy/modalities/fmri/tests/test_iterators.py000066400000000000000000000127641470056100100233360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: #TODO the iterators are deprecated from nipy.core.api import Image from nipy.core.reference import coordinate_map from nipy.modalities.fmri.api import FmriImageList """ Comment out since these are slated for deletion and currently are broken. Keep for reference until generators are working. class test_Iterators(TestCase): def setUp(self): spacetime = ['time', 'zspace', 'yspace', 'xspace'] im = Image(np.zeros((3,4,5,6)), coordinate_map = coordinate_map.CoordinateMap.identity((3,4,5,6), spacetime)) self.img = FmriImageList(im) def test_fmri_parcel(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.prod(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) def test_fmri_parcel_write(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.prod(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq, mode='w') for i, slice_ in enumerate(iterator): value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]) slice_.set(value) iterator = parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])) iterator = parcel_iterator(self.img, parcelmap, mode='w') for i, slice_ in enumerate(iterator): value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]) slice_.set(value) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])) def test_fmri_parcel_copy(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.prod(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq) tmp = FmriImageList(self.img[:] * 1., self.img.coordmap) new_iterator = iterator.copy(tmp) for i, slice_ in enumerate(new_iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(new_iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) def test_fmri_sliceparcel(self): parcelmap = np.asarray([[[0,0,0,1,2,2]]*5, [[0,0,1,1,2,2]]*5, [[0,0,0,0,2,2]]*5]) parcelseq = ((1, 2), 0, 2) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) self.assertEqual(x, slice_.shape[1]) self.assertEqual(self.img.shape[0], slice_.shape[0]) def test_fmri_sliceparcel_write(self): parcelmap = np.asarray([[[0,0,0,1,2,2]]*5, [[0,0,1,1,2,2]]*5, [[0,0,0,0,2,2]]*5]) parcelseq = ((1, 2), 0, 2) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq, mode='w') for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) value = [i*np.arange(x) for i in range(self.img.shape[0])] slice_.set(value) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) value = [i*np.arange(x) for i in range(self.img.shape[0])] self.assertEqual(x, slice_.shape[1]) self.assertEqual(self.img.shape[0], slice_.shape[0]) assert_equal(slice_, value) """ nipy-0.6.1/nipy/modalities/fmri/tests/test_paradigm.py000066400000000000000000000056101470056100100230760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the design_matrix utilities. Note that the tests just look whether the data produced has correct dimension, not whether it is exact. """ import numpy as np from ..experimental_paradigm import ( BlockParadigm, EventRelatedParadigm, load_paradigm_from_csv_file, ) def basic_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) return paradigm def modulated_block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 + 5 * np.random.rand(len(onsets)) values = np.random.rand(len(onsets)) paradigm = BlockParadigm(conditions, onsets, duration, values) return paradigm def modulated_event_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] values = np.random.rand(len(onsets)) paradigm = EventRelatedParadigm(conditions, onsets, values) return paradigm def block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 * np.ones(9) paradigm = BlockParadigm (conditions, onsets, duration) return paradigm def write_paradigm(paradigm, session): """Function to write a paradigm to a file and return the address """ import tempfile csvfile = tempfile.mkdtemp() + '/paradigm.csv' paradigm.write_to_csv(csvfile, session) return csvfile def test_read_paradigm(): """ test that a paradigm is correctly read """ session = 'sess' paradigm = block_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = modulated_event_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = modulated_block_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = basic_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() def test_paradigm_with_int_condition_ids(): paradigm1 = basic_paradigm() conditions = [0, 0, 0, 1, 1, 1, 2, 2, 2] paradigm2 = EventRelatedParadigm(conditions, paradigm1.onset) assert (paradigm2.con_id == np.array(conditions).astype('str')).all() nipy-0.6.1/nipy/modalities/fmri/tests/test_realfuncs.py000066400000000000000000000040661470056100100233000ustar00rootroot00000000000000""" Testing realfuncs module """ from itertools import product from os.path import dirname from os.path import join as pjoin import numpy as np import pytest from numpy.testing import assert_almost_equal, assert_array_equal from ..realfuncs import dct_ii_basis, dct_ii_cut_basis HERE = dirname(__file__) def test_dct_ii_basis(): # Test DCT-II basis for N in (5, 10, 100): spm_fname = pjoin(HERE, f'dct_{N}.txt') spm_mtx = np.loadtxt(spm_fname) vol_times = np.arange(N) * 15. + 3.2 our_dct = dct_ii_basis(vol_times) # Check dot products of columns sq_col_lengths = np.ones(N) * N / 2. sq_col_lengths[0] = N assert_almost_equal(our_dct.T.dot(our_dct), np.diag(sq_col_lengths)) col_lengths = np.sqrt(sq_col_lengths) assert_almost_equal(our_dct / col_lengths, spm_mtx) # Normalize length our_normed_dct = dct_ii_basis(vol_times, normcols=True) assert_almost_equal(our_normed_dct, spm_mtx) assert_almost_equal(our_normed_dct.T.dot(our_normed_dct), np.eye(N)) for i in range(N): assert_almost_equal(dct_ii_basis(vol_times, i) / col_lengths[:i], spm_mtx[:, :i]) assert_almost_equal(dct_ii_basis(vol_times, i, True), spm_mtx[:, :i]) vol_times[0] += 0.1 pytest.raises(ValueError, dct_ii_basis, vol_times) def test_dct_ii_cut_basis(): # DCT-II basis with cut frequency for dt, cut_period, N in product((0.1, 1.1), (10.1, 20.1), (20, 100, 1000)): times = np.arange(N) * dt order = int(np.floor(2 * N * 1./ cut_period * dt)) dct_vals = dct_ii_cut_basis(times, cut_period) if order == 0: assert_array_equal(dct_vals, np.ones((N, 1))) continue dct_expected = np.ones((N, order)) dct_expected[:, :-1] = dct_ii_basis(times, order, normcols=True)[:, 1:] assert_array_equal(dct_vals, dct_expected) nipy-0.6.1/nipy/modalities/fmri/tests/test_utils.py000066400000000000000000000254471470056100100224640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing fmri utils """ import re import numpy as np import pytest import sympy from numpy.testing import ( assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) from sympy import DiracDelta, Dummy, Function, Symbol from sympy.utilities.lambdify import implemented_function, lambdify from nipy.algorithms.statistics.formula import Term from .. import hrf from ..utils import ( Interp1dNumeric, TimeConvolver, blocks, convolve_functions, define, events, interp, lambdify_t, linear_interp, step_function, ) t = Term('t') def test_define(): expr = sympy.exp(3*t) assert str(expr) == 'exp(3*t)' newf = define('f', expr) assert str(newf) == 'f(t)' f = lambdify_t(newf) tval = np.random.standard_normal((3,)) assert_almost_equal(np.exp(3*tval), f(tval)) def test_events(): # test events utility function h = Function('hrf') evs = events([3,6,9]) assert (DiracDelta(-9 + t) + DiracDelta(-6 + t) + DiracDelta(-3 + t) == evs) evs = events([3,6,9], f=h) assert h(-3 + t) + h(-6 + t) + h(-9 + t) == evs # make some beta symbols b = [Dummy('b%d' % i) for i in range(3)] a = Symbol('a') p = b[0] + b[1]*a + b[2]*a**2 evs = events([3,6,9], amplitudes=[2,1,-1], g=p) assert ((2*b[1] + 4*b[2] + b[0])*DiracDelta(-3 + t) + (-b[1] + b[0] + b[2])*DiracDelta(-9 + t) + (b[0] + b[1] + b[2])*DiracDelta(-6 + t) == evs) evs = events([3,6,9], amplitudes=[2,1,-1], g=p, f=h) assert ((2*b[1] + 4*b[2] + b[0])*h(-3 + t) + (-b[1] + b[0] + b[2])*h(-9 + t) + (b[0] + b[1] + b[2])*h(-6 + t) == evs) # test no error for numpy int arrays onsets = np.array([30, 70, 100], dtype=np.int64) evs = events(onsets, f=hrf.glover) def test_interp(): times = [0,4,5.] values = [2.,4,6] for int_func in (interp, linear_interp): s = int_func(times, values, np.nan) tval = np.array([-0.1,0.1,3.9,4.1,5.1]) res = lambdify(t, s)(tval) assert_array_equal(np.isnan(res), [True, False, False, False, True]) assert_array_almost_equal(res[1:-1], [2.05, 3.95, 4.2]) # default is zero fill s = int_func(times, values) res = lambdify(t, s)(tval) assert_array_almost_equal(res, [0, 2.05, 3.95, 4.2, 0]) # Can be some other value s = int_func(times, values, fill=10) res = lambdify(t, s)(tval) assert_array_almost_equal(res, [10, 2.05, 3.95, 4.2, 10]) # If fill is None, raises error on interpolation outside bounds s = int_func(times, values, fill=None) f = lambdify(t, s) assert_array_almost_equal(f(tval[1:-1]), [2.05, 3.95, 4.2]) pytest.raises(ValueError, f, tval[:-1]) # specifying kind as linear is OK s = linear_interp(times, values, kind='linear') # bounds_check should match fill int_func(times, values, bounds_error=False) int_func(times, values, fill=None, bounds_error=True) pytest.raises(ValueError, int_func, times, values, bounds_error=True) # fill should match fill value int_func(times, values, fill=10, fill_value=10) int_func(times, values, fill_value=0) pytest.raises(ValueError, int_func, times, values, fill=10, fill_value=9) int_func(times, values, fill=np.nan, fill_value=np.nan) pytest.raises(ValueError, int_func, times, values, fill=10, fill_value=np.nan) pytest.raises(ValueError, int_func, times, values, fill=np.nan, fill_value=0) def test_linear_inter_kind(): with pytest.raises(ValueError): linear_interp([0, 1], [1, 2], kind='cubic') def test_step_function(): # test step function # step function is a function of t s = step_function([0,4,5],[2,4,6]) tval = np.array([-0.1,0,3.9,4,4.1,5.1]) lam = lambdify(t, s) assert_array_equal(lam(tval), [0, 2, 2, 4, 4, 6]) s = step_function([0,4,5],[4,2,1]) lam = lambdify(t, s) assert_array_equal(lam(tval), [0, 4, 4, 2, 2, 1]) # Name default assert not re.match(r'step\d+\(t\)$', str(s)) is None # Name reloaded s = step_function([0,4,5],[4,2,1], name='goodie_goodie_yum_yum') assert str(s) == 'goodie_goodie_yum_yum(t)' def test_blocks(): on_off = [[1,2],[3,4]] tval = np.array([0.4,1.4,2.4,3.4]) b = blocks(on_off) lam = lambdify(t, b) assert_array_equal(lam(tval), [0, 1, 0, 1]) b = blocks(on_off, amplitudes=[3,5]) lam = lambdify(t, b) assert_array_equal(lam(tval), [0, 3, 0, 5]) # Check what happens with names # Default is from step function assert not re.match(r'step\d+\(t\)$', str(b)) is None # Can pass in another b = blocks(on_off, name='funky_chicken') assert str(b) == 'funky_chicken(t)' def numerical_convolve(func1, func2, interval, dt): mni, mxi = interval time = np.arange(mni, mxi, dt) vec1 = func1(time).astype(float) vec2 = func2(time).astype(float) value = np.convolve(vec1, vec2) * dt min_s = min(time.size, value.size) time = time[:min_s] value = value[:min_s] return time, value def test_convolve_functions(): # replicate convolution # This is a square wave on (0,1) f1 = sympy.Piecewise((0, t <= 0), (1, t < 1), (0, True)) # ff1 is the numerical implementation of same ff1 = lambdify(t, f1) # Time delta dt = 1e-3 # Numerical convolution to test against # The convolution of ``f1`` with itself is a triangular wave on [0, 2], # peaking at 1 with height 1 time, value = numerical_convolve(ff1, ff1, [0, 2], dt) # shells to wrap convolve kernel version def kern_conv1(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None): kern = TimeConvolver(f1, f1_interval, dt, fill) return kern.convolve(f2, f2_interval, name=name) def kern_conv2(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None): kern = TimeConvolver(f2, f2_interval, dt, fill) return kern.convolve(f1, f1_interval, name=name) for cfunc in (convolve_functions, kern_conv1, kern_conv2): tri = cfunc(f1, f1, [0, 2], [0, 2], dt, name='conv') assert str(tri) == 'conv(t)' ftri = lambdify(t, tri) y = ftri(time) # numerical convolve about the same as ours assert_array_almost_equal(value, y) # peak is at 1 assert_array_almost_equal(time[np.argmax(y)], 1) # Flip the interval and get the same result for seq1, seq2 in (((0, 2), (2, 0)), ((2, 0), (0, 2)), ((2, 0), (2, 0))): tri = cfunc(f1, f1, seq1, seq2, dt) ftri = lambdify(t, tri) y = ftri(time) assert_array_almost_equal(value, y) # offset square wave by 1 - offset triangle by 1 f2 = sympy.Piecewise((0, t <= 1), (1, t < 2), (0, True)) tri = cfunc(f1, f2, [0, 3], [0, 3], dt) ftri = lambdify(t, tri) o1_time = np.arange(0, 3, dt) z1s = np.zeros(int(round(1./dt))) assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value]) # Same for input function tri = cfunc(f2, f1, [0, 3], [0, 3], dt) ftri = lambdify(t, tri) assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value]) # 2 seconds for both tri = cfunc(f2, f2, [0, 4], [0, 4], dt) ftri = lambdify(t, tri) o2_time = np.arange(0, 4, dt) assert_array_almost_equal(ftri(o2_time), np.r_[z1s, z1s, value]) # offset by -0.5 - offset triangle by -0.5 f3 = sympy.Piecewise((0, t <= -0.5), (1, t < 0.5), (0, True)) tri = cfunc(f1, f3, [0, 2], [-0.5, 1.5], dt) ftri = lambdify(t, tri) o1_time = np.arange(-0.5, 1.5, dt) assert_array_almost_equal(ftri(o1_time), value) # Same for input function tri = cfunc(f3, f1, [-0.5, 1.5], [0, 2], dt) ftri = lambdify(t, tri) assert_array_almost_equal(ftri(o1_time), value) # -1 second for both tri = cfunc(f3, f3, [-0.5, 1.5], [-0.5, 1.5], dt) ftri = lambdify(t, tri) o2_time = np.arange(-1, 1, dt) assert_array_almost_equal(ftri(o2_time), value) # Check it's OK to be off the dt grid tri = cfunc(f1, f1, [dt/2, 2 + dt/2], [0, 2], dt, name='conv') ftri = lambdify(t, tri) assert_array_almost_equal(ftri(time), value, 3) # Check fill value nan_tri = cfunc(f1, f1, [0, 2], [0, 2], dt, fill=np.nan) nan_ftri = lambdify(t, nan_tri) y = nan_ftri(time) assert_array_equal(y, value) assert np.all(np.isnan(nan_ftri(np.arange(-2, 0)))) assert np.all(np.isnan(nan_ftri(np.arange(4, 6)))) # The original fill value was 0 assert_array_equal(ftri(np.arange(-2, 0)), 0) assert_array_equal(ftri(np.arange(4, 6)), 0) def test_interp1d_numeric(): # Test wrapper for interp1d # See: https://github.com/sympy/sympy/issues/10810 # # Test TypeError raised for object func = Interp1dNumeric(range(10), range(10)) # Numeric values OK assert_almost_equal(func([1, 2, 3]), [1, 2, 3]) assert_almost_equal(func([1.5, 2.5, 3.5]), [1.5, 2.5, 3.5]) # Object values raise TypeError pytest.raises(TypeError, func, t) # Check it works as expected via sympy sym_func = implemented_function('func', func) f = sym_func(t - 2) assert_almost_equal(lambdify_t(f)(4.5), 2.5) for val in (2, 2.): f = sym_func(val) # Input has no effect assert_almost_equal(lambdify_t(f)(-100), 2) assert_almost_equal(lambdify_t(f)(-1000), 2) # Float expression f = sym_func(t - 2.) assert_almost_equal(lambdify_t(f)(4.5), 2.5) def test_convolve_hrf(): # Check that using an HRF convolution on two events is the same as doing the # same convolution on each of them and summing t = Term('t') glover_t = hrf.glover(t) # dt is exactly representable in floating point here glover_conv = TimeConvolver(glover_t, [0, 26], 1./32) event0 = blocks(((1, 2),), (2.,)) event1 = blocks(((15, 20),), (1.,)) conved0 = glover_conv.convolve(event0, [0, 3]) conved1 = glover_conv.convolve(event1, [14, 21]) times = np.arange(0, 50, 0.1) e0_e1 = lambdify_t(conved0)(times) + lambdify_t(conved1)(times) # Same thing in one shot events = blocks(((1, 2), (15, 20)), (2., 1)) conved = glover_conv.convolve(events, [0, 21]) assert_almost_equal(lambdify_t(conved)(times), e0_e1) # Same thing with convolve_functions conved_cf = convolve_functions(events, glover_t, [0, 21], [0, 26], 1/32.) assert_almost_equal(lambdify_t(conved_cf)(times), e0_e1) nipy-0.6.1/nipy/modalities/fmri/utils.py000066400000000000000000000421331470056100100202520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module defines some convenience functions of time. interp : an expression for a interpolated function of time linear_interp : an expression for a linearly interpolated function of time step_function : an expression for a step function of time events : a convenience function to generate sums of events blocks : a convenience function to generate sums of blocks convolve_functions : numerically convolve two functions of time fourier_basis : a convenience function to generate a Fourier basis """ import itertools import numpy as np import sympy from scipy.interpolate import interp1d from sympy import DiracDelta, Symbol from sympy.utilities.lambdify import implemented_function, lambdify from nipy.algorithms.statistics.formula.formulae import Formula, Term # Legacy repr printing from numpy. T = Term('t') class Interp1dNumeric(interp1d): """ Wrapper for interp1 to raise TypeError for object array input We need this because sympy will try to evaluate interpolated functions when constructing expressions involving floats. At least sympy 1.0 only accepts TypeError or AttributeError as indication that the implemented value cannot be sampled with the sympy expression. Therefore, raise a TypeError directly for an input giving an object array (such as a sympy expression), rather than letting interp1d raise a ValueError. See: * https://github.com/nipy/nipy/issues/395 * https://github.com/sympy/sympy/issues/10810 """ def __call__(self, x): if np.asarray(x).dtype.type == np.object_: raise TypeError('Object arrays not supported') return super().__call__(x) def lambdify_t(expr): ''' Return sympy function of t `expr` lambdified as function of t Parameters ---------- expr : sympy expr Returns ------- func : callable Numerical implementation of function ''' return lambdify(T, expr, "numpy") def define(name, expr): """ Create function of t expression from arbitrary expression `expr` Take an arbitrarily complicated expression `expr` of 't' and make it an expression that is a simple function of t, of form ``'%s(t)' % name`` such that when it evaluates (via ``lambdify``) it has the right values. Parameters ---------- expr : sympy expression with only 't' as a Symbol name : str Returns ------- nexpr: sympy expression Examples -------- >>> t = Term('t') >>> expr = t**2 + 3*t >>> expr t**2 + 3*t >>> newexpr = define('f', expr) >>> print(newexpr) f(t) >>> f = lambdify_t(newexpr) >>> f(4) 28 >>> 3*4+4**2 28 """ # make numerical implementation of expression v = lambdify(T, expr, "numpy") # convert numerical implementation to sympy function f = implemented_function(name, v) # Return expression that is function of time return f(T) def fourier_basis(freq): """ sin and cos Formula for Fourier drift The Fourier basis consists of sine and cosine waves of given frequencies. Parameters ---------- freq : sequence of float Frequencies for the terms in the Fourier basis. Returns ------- f : Formula Examples -------- >>> f=fourier_basis([1,2,3]) >>> f.terms array([cos(2*pi*t), sin(2*pi*t), cos(4*pi*t), sin(4*pi*t), cos(6*pi*t), sin(6*pi*t)], dtype=object) >>> f.mean _b0*cos(2*pi*t) + _b1*sin(2*pi*t) + _b2*cos(4*pi*t) + _b3*sin(4*pi*t) + _b4*cos(6*pi*t) + _b5*sin(6*pi*t) """ r = [] for f in freq: r += [sympy.cos(2*sympy.pi*f*T), sympy.sin(2*sympy.pi*f*T)] return Formula(r) def interp(times, values, fill=0, name=None, **kw): r""" Generic interpolation function of t given `times` and `values` Imterpolator such that: f(times[i]) = values[i] if t < times[0] or t > times[-1]: f(t) = fill See ``scipy.interpolate.interp1d`` for details of interpolation types and other keyword arguments. Default is 'kind' is linear, making this function, by default, have the same behavior as ``linear_interp``. Parameters ---------- times : array-like Increasing sequence of times values : array-like Values at the specified times fill : None or float, optional Value on the interval (-np.inf, times[0]). Default 0. If None, raises error outside bounds name : None or str, optional Name of symbolic expression to use. If None, a default is used. \*\*kw : keyword args, optional passed to ``interp1d`` Returns ------- f : sympy expression A Function of t. Examples -------- >>> s = interp([0,4,5.],[2.,4,6]) >>> tval = np.array([-0.1,0.1,3.9,4.1,5.1]) >>> res = lambdify_t(s)(tval) 0 outside bounds by default >>> np.allclose(res, [0, 2.05, 3.95, 4.2, 0]) True """ if fill is not None: if kw.get('bounds_error') is True: raise ValueError('fill conflicts with bounds error') fv = kw.get('fill_value') if not (fv is None or fv is fill or fv == fill): # allow for fill=np.nan raise ValueError('fill conflicts with fill_value') kw['bounds_error'] = False kw['fill_value'] = fill interpolator = Interp1dNumeric(times, values, **kw) # make a new name if none provided if name is None: name = 'interp%d' % interp.counter interp.counter += 1 s = implemented_function(name, interpolator) return s(T) interp.counter = 0 def linear_interp(times, values, fill=0, name=None, **kw): r""" Linear interpolation function of t given `times` and `values` Imterpolator such that: f(times[i]) = values[i] if t < times[0] or t > times[-1]: f(t) = fill This version of the function enforces the 'linear' kind of interpolation (argument to ``scipy.interpolate.interp1d``). Parameters ---------- times : array-like Increasing sequence of times values : array-like Values at the specified times fill : None or float, optional Value on the interval (-np.inf, times[0]). Default 0. If None, raises error outside bounds name : None or str, optional Name of symbolic expression to use. If None, a default is used. \*\*kw : keyword args, optional passed to ``interp1d`` Returns ------- f : sympy expression A Function of t. Examples -------- >>> s = linear_interp([0,4,5.],[2.,4,6]) >>> tval = np.array([-0.1,0.1,3.9,4.1,5.1]) >>> res = lambdify_t(s)(tval) 0 outside bounds by default >>> np.allclose(res, [0, 2.05, 3.95, 4.2, 0]) True """ kind = kw.get('kind') if kind is None: kw['kind'] = 'linear' elif kind != 'linear': raise ValueError('Only linear interpolation supported') return interp(times, values, fill, name, **kw) def step_function(times, values, name=None, fill=0): """ Right-continuous step function of time t Function of t such that f(times[i]) = values[i] if t < times[0]: f(t) = fill Parameters ---------- times : (N,) sequence Increasing sequence of times values : (N,) sequence Values at the specified times fill : float Value on the interval (-np.inf, times[0]) name : str Name of symbolic expression to use. If None, a default is used. Returns ------- f_t : sympy expr Sympy expression f(t) where f is a sympy implemented anonymous function of time that implements the step function. To get the numerical version of the function, use ``lambdify_t(f_t)`` Examples -------- >>> s = step_function([0,4,5],[2,4,6]) >>> tval = np.array([-0.1,3.9,4.1,5.1]) >>> lam = lambdify_t(s) >>> lam(tval) array([ 0., 2., 4., 6.]) """ if name is None: name = 'step%d' % step_function.counter step_function.counter += 1 def _imp(x): x = np.asarray(x) f = np.zeros(x.shape) + fill for time, val in zip(times, values): f[x >= time] = val return f s = implemented_function(name, _imp) return s(T) # Initialize counter for step function step_function.counter = 0 def events(times, amplitudes=None, f=DiracDelta, g=Symbol('a')): """ Return a sum of functions based on a sequence of times. Parameters ---------- times : sequence vector of onsets length $N$ amplitudes : None or sequence length $N$, optional Optional sequence of amplitudes. None (default) results in sequence length $N$ of 1s f : sympy.Function, optional Optional function. Defaults to DiracDelta, can be replaced with another function, f, in which case the result is the convolution with f. g : sympy.Basic, optional Optional sympy expression function of amplitudes. The amplitudes, should be represented by the symbol 'a', which will be substituted, by the corresponding value in `amplitudes`. Returns ------- sum_expression : Sympy.Add Sympy expression of time $t$, where onsets, as a function of $t$, have been symbolically convolved with function `f`, and any function `g` of corresponding amplitudes. Examples -------- We import some sympy stuff so we can test if we've got what we expected >>> from sympy import DiracDelta, Symbol, Function >>> from nipy.modalities.fmri.utils import T >>> evs = events([3,6,9]) >>> evs == DiracDelta(-9 + T) + DiracDelta(-6 + T) + DiracDelta(-3 + T) True >>> hrf = Function('hrf') >>> evs = events([3,6,9], f=hrf) >>> evs == hrf(-9 + T) + hrf(-6 + T) + hrf(-3 + T) True >>> evs = events([3,6,9], amplitudes=[2,1,-1]) >>> evs == -DiracDelta(-9 + T) + 2*DiracDelta(-3 + T) + DiracDelta(-6 + T) True """ e = 0 asymb = Symbol('a') if amplitudes is None: amplitudes = itertools.cycle([1]) for time, a in zip(times, amplitudes): e = e + g.subs(asymb, a) * f(T-time) return e def blocks(intervals, amplitudes=None, name=None): """ Step function based on a sequence of intervals. Parameters ---------- intervals : (S,) sequence of (2,) sequences Sequence (S0, S1, ... S(N-1)) of sequences, where S0 (etc) are sequences of length 2, giving 'on' and 'off' times of block amplitudes : (S,) sequence of float, optional Optional amplitudes for each block. Defaults to 1. name : None or str, optional Name of the convolved function in the resulting expression. Defaults to one created by ``utils.interp``. Returns ------- b_of_t : sympy expr Sympy expression b(t) where b is a sympy anonymous function of time that implements the block step function Examples -------- >>> on_off = [[1,2],[3,4]] >>> tval = np.array([0.4,1.4,2.4,3.4]) >>> b = blocks(on_off) >>> lam = lambdify_t(b) >>> lam(tval) array([ 0., 1., 0., 1.]) >>> b = blocks(on_off, amplitudes=[3,5]) >>> lam = lambdify_t(b) >>> lam(tval) array([ 0., 3., 0., 5.]) """ t = [-np.inf] v = [0] if amplitudes is None: amplitudes = itertools.cycle([1]) for _t, a in zip(intervals, amplitudes): t += list(_t) v += [a, 0] t.append(np.inf) v.append(0) return step_function(t, v, name=name) def _eval_for(f, interval, dt): """ Return x and y for function `f` over `interval` and delta `dt` """ real_f = lambdify_t(f) f_mn, f_mx = sorted(interval) time = np.arange(f_mn, f_mx, float(dt)) # time values with support for g vals = real_f(time).astype(float) return vals def _conv_fx_gx(f_vals, g_vals, dt, min_f, min_g): """ Numerical convolution given f(x), min(x) for two functions """ vals = np.convolve(f_vals, g_vals) * dt # Full by default # f and g have been implicitly translated by -f_mn and -g_mn respectively, # because in terms of array indices, they both now start at 0. # Translate by f and g offsets time = np.arange(len(vals)) * dt + min_f + min_g return time, vals class TimeConvolver: """ Make a convolution kernel from a symbolic function of t A convolution kernel is a function with extra attributes to allow it to function as a kernel for numerical convolution (see :func:`convolve_functions`). Parameters ---------- expr : sympy expression An expression that is a function of t only. support : 2 sequence Sequence is ``(low, high)`` where expression is defined between ``low`` and ``high``, and can be assumed to be `fill` otherwise delta : float smallest change in domain of `expr` to use for numerical evaluation of `expr` """ def __init__(self, expr, support, delta, fill=0): self.expr = expr self.support = support self.delta = delta self.fill = fill self._vals = _eval_for(expr, self.support, self.delta) def convolve(self, g, g_interval, name=None, **kwargs): r""" Convolve sympy expression `g` with this kernel Parameters ---------- g : sympy expr An expression that is a function of t only. g_interval : (2,) sequence of floats Start and end of the interval of t over which to convolve g name : None or str, optional Name of the convolved function in the resulting expression. Defaults to one created by ``utils.interp``. \*\*kwargs : keyword args, optional Any other arguments to pass to the ``interp1d`` function in creating the numerical function for `fg`. Returns ------- fg : sympy expr An symbolic expression that is a function of t only, and that can be lambdified to produce a function returning the convolved series from an input array. """ g_vals = _eval_for(g, g_interval, self.delta) fg_time, fg_vals = _conv_fx_gx(self._vals, g_vals, self.delta, min(self.support), min(g_interval)) return interp(fg_time, fg_vals, fill=self.fill, name=name, **kwargs) def convolve_functions(f, g, f_interval, g_interval, dt, fill=0, name=None, **kwargs): r""" Expression containing numerical convolution of `fn1` with `fn2` Parameters ---------- f : sympy expr An expression that is a function of t only. g : sympy expr An expression that is a function of t only. f_interval : (2,) sequence of float The start and end of the interval of t over which to convolve values of f g_interval : (2,) sequence of floats Start and end of the interval of t over which to convolve g dt : float Time step for discretization. We use this for creating the interpolator to form the numerical implementation fill : None or float Value to return from sampling output `fg` function outside range. name : None or str, optional Name of the convolved function in the resulting expression. Defaults to one created by ``utils.interp``. \*\*kwargs : keyword args, optional Any other arguments to pass to the ``interp1d`` function in creating the numerical function for `fg`. Returns ------- fg : sympy expr An symbolic expression that is a function of t only, and that can be lambdified to produce a function returning the convolved series from an input array. Examples -------- >>> from nipy.algorithms.statistics.formula.formulae import Term >>> t = Term('t') This is a square wave on (0,1) >>> f1 = sympy.Piecewise((0, t <= 0), (1, t < 1), (0, True)) The convolution of ``f1`` with itself is a triangular wave on [0, 2], peaking at 1 with height 1 >>> tri = convolve_functions(f1, f1, [0, 2], [0, 2], 1.0e-3, name='conv') The result is a symbolic function >>> print(tri) conv(t) Get the numerical values for a time vector >>> ftri = lambdify(t, tri) >>> x = np.arange(0, 2, 0.2) >>> y = ftri(x) The peak is at 1 >>> x[np.argmax(y)] 1.0 """ # Note that - from the doctest above - y is """ array([ -3.90255908e-16, 1.99000000e-01, 3.99000000e-01, 5.99000000e-01, 7.99000000e-01, 9.99000000e-01, 7.99000000e-01, 5.99000000e-01, 3.99000000e-01, 1.99000000e-01, 6.74679706e-16]) """ # - so the peak value is 1-dt - rather than 1 - but we get the same # result from using np.convolve - see tests. f_vals = _eval_for(f, f_interval, dt) g_vals = _eval_for(g, g_interval, dt) fg_time, fg_vals = _conv_fx_gx(f_vals, g_vals, dt, min(f_interval), min(g_interval)) return interp(fg_time, fg_vals, fill=fill, name=name, **kwargs) nipy-0.6.1/nipy/pkg_info.py000066400000000000000000000054441470056100100156230ustar00rootroot00000000000000 import configparser import os import subprocess import sys COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' def pkg_commit_hash(pkg_path): ''' Get short form of commit hash given directory `pkg_path` There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a file in INI file format, with at least one section: ``commit hash``, and two variables ``archive_subst_hash`` and ``install_hash``. The first has a substitution pattern in it which may have been filled by the execution of ``git archive`` if this is an archive generated that way. The second is filled in by the installation, if the installation is from a git archive. We get the commit hash from (in order of preference): * A substituted value in ``archive_subst_hash``; * A written commit hash value in ``install_hash``; * git's output, if we are in a git repository If all these fail, we return a not-found placeholder tuple. Parameters ------------- pkg_path : str directory containing package Returns --------- hash_from : str Where we got the hash from - description hash_str : str short form of hash ''' # Try and get commit from written commit text file pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) if not os.path.isfile(pth): raise OSError(f'Missing commit info file {pth}') cfg_parser = configparser.RawConfigParser() cfg_parser.read(pth) archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') if not archive_subst.startswith('$Format'): # it has been substituted return 'archive substitution', archive_subst install_subst = cfg_parser.get('commit hash', 'install_hash') if install_subst != '': return 'installation', install_subst # maybe we are in a repository proc = subprocess.Popen('git rev-parse --short HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=pkg_path, shell=True) repo_commit, _ = proc.communicate() if repo_commit: return 'repository', repo_commit.strip() return '(none found)', '' def get_pkg_info(pkg_path): ''' Return dict describing the context of this package Parameters ------------ pkg_path : str path containing __init__.py for package Returns ---------- context : dict with named parameters of interest ''' src, hsh = pkg_commit_hash(pkg_path) import numpy import nipy return { 'pkg_path': pkg_path, 'commit_source': src, 'commit_hash': hsh, 'sys_version': sys.version, 'sys_executable': sys.executable, 'sys_platform': sys.platform, 'np_version': numpy.__version__, 'nipy_version': nipy.__version__} nipy-0.6.1/nipy/testing/000077500000000000000000000000001470056100100151235ustar00rootroot00000000000000nipy-0.6.1/nipy/testing/__init__.py000066400000000000000000000020151470056100100172320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The testing directory contains a small set of imaging files to be used for doctests only. More thorough tests and example data will be stored in a nipy data packages that you can download separately. .. note: We use the ``pytest`` testing framework for tests. ``pytest`` is a dependency for the tests, but should not be a dependency for running the algorithms in the NIPY library. This file should import without ``pytest`` being present on the python path. Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> img = load_image(funcfile) >>> img.shape (17, 21, 3, 20) """ import os # Discover directory path filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) funcfile = os.path.join(basedir, 'functional.nii.gz') anatfile = os.path.join(basedir, 'anatomical.nii.gz') from numpy.testing import * from . import decorators as dec nipy-0.6.1/nipy/testing/anatomical.nii.gz000066400000000000000000001705241470056100100203640ustar00rootroot00000000000000ZYJanatomical.niis, Y62ܘsm۶m۶m۶mkwUQ5YY'y牘R')**&J2,Fy*߷&[m%G("kߩm6-{5m-:Gs*yF_.+?D_9t;+7nhܛ~OLt`qușf7%;Fvg Ճ3ZOJh@n³+0@ z&F2ZnW%&U9Mc41WĄAFnH7ք;y |-HFAm"K26,4BC]T $Nl&LHYhqh׭@O+=X]<"9a|8Wl$y7]$HFSNC᲼"簿ʩp~M"K_q/L4H3\T=0sJEQ=^'8&X8.fS~ϔIenA(` V 6:%~ruZHEZO|Q;Z1s!~"/MIHTO[}ln߽@~_lz:I%$ =60Gr'25/#Z\Gx#MnbM6cKlWH)H$-W==^KUe\W%$ڕ'a߱p\֢c6miS$o:JLCM5rڭW'j2AzFNl-WFkh!3Z Y^˜g7RթuQ;:/W;v7ʈV4I-WK?1(JmtnPH!_c.rm$BS|b!+ӂ:% ?=nKcy;raW>Gx݈6|!rQI8~Jdw]OHS_w;DPوF#QzJ)/>o^YlD\d˭PWvjx MQ5F[ĔR1RmN._W]|žl,_]r=!h3+ɃR]iieF>(A1"1 ^{T{N0}1jY;fS> .n+it5T݇(ORF42hVqtF:7t~);S-&*37,ẙ% {]Ms ԡ)Y6݉YO.5|n1xNqoYKFY|bS¨I|o_~MEvyn }_hvVPm}GSkc L>gI+Rf)\B'5;~F߅"/3W oLDv& 3ZKRQyىmg&ˤI^Nu߹xNpy{+UWKi7_bejDMMv;6i50iN#h/rg+N1|9A)_b.5OO{oP+6Z` ԧ7XUV)Tk;i}W׻/Ya(: XKbݭ}^nr+w^3FNPCmںz 6xšz*);jk*4PKWT)"!q*95eϘ`NBı$=hBw-oדcV_EP(MZ[u)P.8:jodBMg1)Goմr;l, 2TÿY1NRIe Zb7oLy:{fd_GXNf..D<+F2gR+(B#3ӵ @tIf Y2kYt⎸U^N}2[0td gd=^;kܱdqFў.~- ヹaT0UJSBhZi3GwNpg99qX656ף8+,,*KJT@](l}bT`S`++1}3F(U'3=hׇ}d&ٱ cHk1+.G;1V5̺7q—N>dںr^ ३Drw'lWE/FuzRw R=xa9ԖePJ?OnAa^3%P>z!X+l Y%qM%WPG} ޿)Ocbm?Js%\GP>>7+G,zT%C$:<̱(.*+JD3Ԟ'^2f?otd `&iH.W\UbѐnO:CP_n@ y%m0yE4(5>羊?q>S{=6yW'Sf\ .p}Tl.+`!)&6XI&oId%%N{Gc+B1^ %4`PlDZ*Ra(/G$֓w3g,3vr3,W$m=/u*;{ 炗L1>)d[nL _}6rM3) f0y係J 0_BY|7TRV;cԼ)ZME7Ƈ/fcGȂG!Hw0K2m$@*>oC텱 SG3[*ue%{ٽYDFHUARUp6l*JnyDr-ixq죻3F{hsV@=5S1x=ۂ ?zWP Iv2ii䮞)|DNW+f5󰸟3cJ'00ى1z` %pZli* l6D#w8.g j)7M,L-Vێĥ3%ouvpwGIxM}v#j#ZWjI#Ͱ話lmK(Ma* ulwnӲ7zW@.,gaxK-Ogojwz(N,񰑱 nUÆɯd#:`|Fal3Ry7{}̝r]uIBa5ڤ !0X)7zzIz!':'_ʻٮmDNO5qLCH.KbD5>)_c*{b޳F yDk Q>c+t8 ?3aHz0~J?E|L-gu/Z 0;cy[qn?7e I\O*m;e:Ye478g}g-2u{|9:Yy9*f7kU SIAo\5K48Q*.&*=~fe ={ [Ӟ(l}۟Kdkݻ]3;؛280,_8d6IO]]f6H@L%`j>(ett΅[}gk-6BE^rōNwb8yWHCil8wxI^鹃)F=N9,K֔)~T5z`{ ]X _t?c8xVJ[nt /[{VhCpk$B'Fqpa]\,:E<\O+/KN'Xəf L; :LY/Lڣ,GuZ70?.ӍX}ҿY~.vψYhW`0ʻP U͚#i^%VZ׵^u NqB+=!z&PY'S7~f }W\_Hs#L:-PQ ZU|2kڿCy-S9ڤb`x:[NPE}Qmݘ5h )FʠZiL>餅b>/<һjlK1vrHܭ܉y`87" A3o$,~# pb8DƗ0a2?XqGf YB%UN(z݇0??>#Ft_kkNGob6O,ƎscqD_ܖDa|B- eS'r34Q.KaPL"-\lz3_N&*f x?'O1,W*`s!ȴdp*4<Ž3>L}YiΜ'0Cx@$Ѝ鞩KsT{,uOxή0م\ l Q/mć^%]MF} *,]rt06 -T^M3*A=oyc|_LSlS@lYȒ(~t-.*Fy 3nO9Y_7VKӕF^I]@|I]*<ʊj&q_M@7`NU+$lwSGRyzF>*)a^b]t>JQ==]$+mra r$[KF.ֆu_e%>>%Eu2-#w/.uF\QZ~CKsP1-Wj$V'p)l!^gKBO0I)77Ū25y5ފ'|l!L$w|0k JׇxF 1Bf ]'93~0KjZ{Tg-}AD~ ̅GU?un[bvmvƳDKcBEav߆xE K+@O/3+׍ZvXۨx4<%5a]8-A%18cC l+xkJN!`^i)<2|6mYM.CT~K+u9rCUz r.ut-1/"4*%T44/%CGCIֽ"'bE.1RO9lq*Gx#}h,ƿO XTS>Y'*ݜyG.an&+|4J\ % aKBS׋&TX! )Wd2RHiB!dx7*q8JCJfrxBi-Ђ4nA.HŸ㟨R Ą؆]6aR\M]1^+ !R8FJzEN]2 v"^)%'ROVyf.zoT<$mz]~XEإ[ShN?h8чz/@|\UZ+TOaF 5wh&>AvB@p]8%R5rH=] RQ?] F*t}~q +m<&B.1K'OV>LPI{[ X_"VL醎QiDDĆ0eN(@vȦZM=.=Wi䵴Wت,I_(2ɛ/ E-~9 q$)Nps /uVJ簛]"$2_RҕhF >Lz=ޖ>wOG/ܾi=#s~4P,* k}X叴Rvg}U==\!Ua6r$G[q8<}p^!b@*Fsԯ/yRU(;6 ?Br<7&ΈW81!~Vܵ󞯨nf,.uobus%+`m6(s ՔdaM?n0#-;j` $[J L[]i(ìseQVA-zU>c=ݕ݀jM?V҆՜78cJ%Z [PC>)Fsm!<(yD[)?Y vDewE `y]f>w=/s#+uDlŻuD.F L7p=[^fVcbRkJ?y^SER dzqžͬ<>^8۝9Qp|Ϗ䶁?0Nae 2,s4՚au ~fNƍ`+YQW "?Wv8.7F:5!sɨiVPz ^\f\͘Ax]r|ILVmTAl>6o8S^~vL' 4؍6+r G'15Z85V3;[Bu}z[5Ӻ]鲲43j`p~-_7phΛz-ӏUAu8Լj=%VZnx׊ 7vP}7j\'ꌰƙ$ WnN>SBFgk/'aDD8%0,L Q/0ŝl>0šLg5!|#C1_FHwBefc};*mh /p]'QJ WpDx}IpZJ&R*}P>t/l15=8HK]Օz gu朳d`c3R|Aa.ћl,` M'8`4[Ã{-u7m˹SmZ0L!3݂@-#%l'M [5s`2#^0g7Hm^x w 9Meaȭx>%6np8otJnчorŻ\)&H ,ı."k#\c򽉞S.DmQU)ߓW(q ;8cC} j ݑԠ_%D}^T3-d[]j,NUPZFXRJlg. q[ v(m(;A!l{ByjqHh.BXXqV'+5G7)oew6RyI%9n;<n~ìf F1Rro8fVle7gc.?Y On-fY3Oщu}JHC?TjiSy_ϴ;0#1L3X]dTI`51Yi8PiR9d~^~Xxq;k:3_˨!J=H5&a2)/u?tEw;W',3[> }j!8X.>dpJ|v:us,P3t;D 2,XMORr*NQGp+Lj=L*ʂgH&/vgE|'E -SnT(][xU<[ގS.lg|~W&ksY/4 Ht2H|3S;HvZkZ餷:[+5\'>H9:rS{dboسU:PbbMCz\pJ9fM=q nQޏ=-ӖP,# #}V۩yQK]N!z[P58m?yy]ԵJEe;?*'_-7+o՚jeŌ0o##sA;d;fw8ڤPZ)K#noɾVʈ* OZ ^X/f%dx*_u^GP/c&ȵdN>J6ZqzLAv] a ~R8gt#(Siz?R@R5^"5EJMRM|pwn7սu)nh: EzfyP^hW/r`Kn7=g6=WWqw+^JhZX90L!E,TM?-54筜4 Ex]I}Ak"KyCL"@5UT6Q7ɏ@eCN 1pv.ޚ@A@Oon$6`ldwdtC0Zޜ:h1b[%wkȍW-Ic"yI8+E R[':5u m0 |6wb)J l;@bEץa ]kB_7 ^v.u ZE,0 tU P&SխF 5P{z$+K&u!OBrZLM"j 0^oϵDc'IߓSko;x#+ AP=D6MؗGcUmϩ'(؆c&Uʂp p걯\p^._v5 TԋP]\uuy6 7+v@ >_p.xD\5{jJl`fk=Ӯcг0m>rU,d XXa݃7uS^lfR!pG?\-\ t>uZO! (pkY?ĨcςԼóg*J$p_4: 6EJ?Eam3$"^9;H&gyi@:'@tj S-60KPK(a6=X@Ҩ`f̣mC.BzR(LLQ&+}?ąEKaGknՅ'\zPf`O$P X.՝$&*Y㏲"KߢvZSɛڌ)px: !UX:y+%쥏m#h-E}`[Bti7>̕w* v:OCOA&{M SEE<2 !l4^Ñc:[v(>/X j1/Qck2~Ve$z"7Ik1]jM("?k} ~\:pc u@m.9թ[AՕ{ CmZiH^:K`a+Xq~͢x!'P"\qLkXd'M -}Fb>SU*_2vϠ*ZVg28_W۽Hݤs`m kе|~+=SʝZ̳@l&/LOr@b)7N3 l4JOb圬x-x=ǂ9;B 8 0rۂX^ž}eR8cK D=;H0 `Wbḛ03ބ\CX-tyn n&kn$zc 'SZ^XZeh0'EzAM); k`qK/~P8eS"T]_*֥Anӥ\9NVE}g{vbsڌvڐEiHV>BǪ` ~g%"Oc-/n'' Ũg޼|&L̯El.clr0#QF-RIUT*qd&gn_ɺZ sqIŪګ@GsJq=JO=h,U;ԬzXZF)"COWz9lEQYV^F]{o0O_p!l:i r{3懗LQY608i⓼S%еAl*~K" J 'Gr ^տHo*n*V7>?#cb|ۉs# [Ȍㆊ+Ϥ_\k9+O =,fQ=Ix": BA7B~!Ru,RJqKrP[NwOZc׸14JEq@,U$+.x.r<4rM8ӎɯ4&|=׉\ íRS>k*L yAb#zҙbc3[d~9bWOdMbl5Y8ȤW.~Ee?myxxMzMZCΪ~HSR1/};+NW SGj~RIgx _qj?:":\ =$/\+((QbM8\[,2X97J{^K^K Jr8E$D*5f1t*VWϿB^'T")(l:N*6W>sZI5O1>H7ScXA"Dy!rOh./0 b{ Ӓ3~%?Do)dy2PzG}tE$v 꿠*L )O3E V¶73ZmBfk9b02|zoxhdw.yd(R؉6IJU/`/hJ7$+Dϯtu9͡iEb`8_#Z]O.pU5vGy6/,I>9wmٺnO2-5K8+2CBZe->ȥ1ʚQ'o]}$0)nĘOcKX?(gc$rZ *ju4jjjCsqƙ?Pz `/sZ_=тq톶@߬7{f vznC5<#f*^kGۥ@vQY#Q)-emWkkKQuS H"VB&53I8m(^_+?>bGpr:zuuL;ڛ nYFmHm (°-Ff2rӐi7WeOFu'h5ҮcgTL o ٝ]*xqT(>ck[퀝gqjskH焾uhsURu̔em*|.Úьp$;|1ځ<'l TOm5LЊ+gk*׎"Jw&b46u3r?M*@М-ˈ5`A_ⶉ,K,AcUワ10;Аάothc//Nlc\M ^gJe:$Q^%0Arr^6=;w!?S͌aHi6Vs0Q-/Zѫ.7drީuH/ԡɵP_72Vev+7GEQ[P *7!{U]\'2X]:/K/K̪Țf*W[\1k!} u.ɵr["dWHUSt|*ٍXN?Lb1xKzKm<,Cf \WEEB yt,~nH)ᔋAlfں &#/Rc0*%P$r2P5xL.#/ҵc'fЊ~E:\ys~ZGE$|lbb]Qvqt|Cu+7B/fun V*Ya󤻧!')>6\,c'y-u\OI8L]sBzF=*&॒_Z,6]*P5Z?jDK_5v%a>ByW AYZ;n^\XUͽƽwя 6(H#x i$h[ "jm X&ie70ѿyz? M;V~jjeN=6sȎ, t.[* B7uNONS^boAҮ:k+o54;k}1,DPDmXR;u' xc[vAh6Řjcq&Yf͍hj>6׆h}m^4' a5=c0+x>c_r@:U #&^_bVECxA2c5a7i`} &vܕ0 3Cl16!>I.dgzJ!]?j+)q nXeX*Nnx8Jj|@E\cN#y6ì1g2, ۡNOu25QDlT"sŵ喊5P2,3{^TcvDrevT65ve뜵wF[TBϧ"Be!&`xG/^'[v1Ҫmqjc~'o S9Wi(HO6lo Z=+6b/bD;C-<HurZ>=9򔉼" ixn#sGGZC9 [+CT* m.ֳg['蓶D:*b4iͭ~*EpdlL \sh.l*r{W?7g@039f@,* DR"=arU|6^֢ ͑nM/}:gp?nգp@xӘfn'Wx˼Wvr HqcKAg!uz]; v3A Oq~Ć4yrS>Y%-RuqFur+ MI@R|Z@%rM"نf3Bku{}.K2,A\1niAwvI]ԞL&M]ĒȊݠ?sv=˺$W! }=^*gKq>|HΠkKpCtNP.Isw+9y#fST ̣uLv:u=>喧bۈMD6΍{g}`uQŔĦG>MGIvycX r1 ;9ֺ-3jFhU X]7>ngXe)}- UQWB\m0o/gُBv_=ʌ3ŒhL >R[5r(U^'sk)KVbL!hYMFo0=\++JMy]\fDidk`/q.ɓ7 L'u  F)ktBz(/WSJ>>]/5|.Xv`I=vHe|PF>SaOyLG}mi*"I_Yp\P~)b-"jRnR@|MDJ؁[ϓ5ϓw?D:=Qe|Mj˟|lK;ӔJG y]xfzp^pW`*87u9p%I Yz_P1䣑n/+-m*OŽ%6L1_bq@\c[ӫ9s$5c]\SSK&}IฺL:?Ǚ21N%dӚO̤¾^nƵ؝2߫ÅxvVF/jX.A%VNDuDρO"43#YpBc@>-uAzl6%0 Հ`RR"~¹Lq~wFUJ!p >~r>i4Q_+as8sćVΡk 0clHqsҰ6LOa:Bt9 /ݍFPM 닑;?cr]&g`O,]A!=DCQ-ÍP,<SM{ +삚}:-0+n0MAm+VߧK~ŵhf*MRvi̇J}BMsS/(Oo˗_Q_S$& ۄb-6bGSlVEK$J}.6י ÅO'jWSNzvtVz,bvm@1Z#u 5C\dQēp@ 7%,.s|Nz턟gj%c GnWb'";R$:?}#ƦfSSTZ 9E eF<Oۼ 7lH@;&ڎ/Tk:+m`gn@jVZ9廅 3] ?~\O)zkC7k̒([|~R?}!:e{8f*J2#}0UD%rf3/(#jf}_?axK$fq{TV] G*3j\H.oS@M*dvr;+ {z唶G}#R|./˰mvȃTB!2Ocګ uA/dtZY=^Ua?:Z턞lRX|MR_ŜӚjF>TvV@ gPpQ5ãq-tIQ9S?I)V6`Ԉ*6怖V G`];3bTd1ՎJ7eTV"Kx](e2R&jQkQJbYG{y㥾EaՍ5 ]J㯔R+{ՓSp͹<,/]3c~kSm R]y!SJKqr9G,}TJ\p8Uj9Bj3Po}tkKO=J3%UyY.(䓪F\Z쭑kx%jC R!ɖH%VJ iJf_rZ OGOw)pGA:Rt4bx}d@ Av\*^/E7S=*a4OIo0Y/NT`CMbuy7Y~cuHP-Hʘ^3p,!>IV}ԹZW@]&e=&0̌aw5&ײ'kfRN\gjOQrv%T)J3)磪r0C&{JRYV/I0Q|ȵ}:fltRF%SFm3}uTO8VMEgV՘a0ngYxTpKRJoi^' IJ9^ǾJ2OiaP(-ӣD͝VUsylzp "d]Z-UY /jݍYeX%?fk% ( 8qv%:*Y?̭Eu˨, áM#W2{Ekm!j i3\Y+K SB-Ct(kFi-J\>sk`̜g2Hmi UܥyQmmVUlڠ_ڥLWs%vĠ= 1~z.#ػ:ZDKP8Zp^!gIەa}NbP! MGrm8~Vr\ #Z]ZMI$Z'!}V(;`7rPϫ-vTCoIE./,0 [uhLKyAUSખ BUs *UilpCyB*?D F[Ӭmm^ʙ4T@ȥ^joPE#i8=}Bx[8'/ cD<uoYViS΢tt"WnNZ#XOJ^eTIM`1#*n6Y ZD[ZCkszoT5P%u/!fH4?؍.Vc#d˳)*2jQYv@TI -t?!nxIR>J}`D;d#^'XESΨ`5L x^㙣g2|bkwC<)vF]V ;']\|oZ^:tin4EuӹlEv$r픅\3RW1Ow)s@.9 iMYgp߹\v6Nrn$ uR;nnr< 7|[FZt+??|W:mT&".:#))!S,ZĭictjmmRW\23>Idzź S!Rf:cv\gFI.G.V|LfczT?z^8)?C8]J.&[?DA\" 7Jm(N}SeH[CT[>{[(ѶkY–"~hթ#ϥ ew`r@ʌTJ-'wҀ|CW6ݸ:KvEnfLY,q6drOO{B"5,5 ;}yt%9?bV8iS3Y}.K\+7B,kxBP{$%RY"焎rP@vIe%xrYMSsgd3;Ee'SQ`l7Yh.7^1-(*~9Ƕ(c8LG /"Sc ,5t[*DKd%4ٔ 82<+M,TL4$ еAgn~fU{c2WMӥtUErq7~]Wf43yWgUK?%װ_+>#*jq9nL4wV9{vmyF+fF%J],W/ˀTW+[Yr|\)z'&c9N+mBZxT?įXԁ#gLU_ ?蕶>BQ u8CgBV9cYm8< b}0?/הd#bb_5F*[jU$XT.,;†;eQiO= +aTny1b@|TNcaoUcU[+k)%E.P6 j 3@GqЃ}{4WjT^g;v4uZ\ cd*amK_sΨQA!(n$; CvRMx=8E(Ah›LefpY;f4 ^qZnX _# 3DAB7ýBT  Hn,R_ SWi:2mX(UT 2!9#5}X֊33T0B*ŕcd0o[TOӽS{e!N&84BpIՕj]8z-1[c*+bl/!؟raaa#mI+aOh^7ky둶ˮkЎhb'g _7S3k8gI5KŧˋPysZ#o߸g6k@P>%o-n0ƴ6{o/,URxH)ٴ觹A gZ[f1V_eEy'Tj q?G/v:PHV*UStM{WÃxz ezIA7Ï؟rK)LWԎZ' ^֎/ԟq6z j0; O]|Oa'Q(4FɑW]h4EkДH7{*Z߀p<w}{KGzB(uF;跱Y߭1'f]#%/}R?੖=`Jh֑9]A+Q[ሃwka+pOj%p_Wl*W4kq<’SZ~e| L3°"͋'3ٺ'9.i\gY2siFL4ҍxӅcw汫ߜ?޽ sn@CpU-fn T.P]r2vki(992qw=c֓p1MU#<WUXSnLa.aL/]IzVnJ.aV D?P_(+~T]|w7Ż (`q`wp+T?)Mhy|G(5r1e]Uf.C/ޢEdy(_BZP2|Jg.rOy]- JC=O wP?ț6y;(R^r}ȅka<?^u 8nc99\ P1?ByӃ~`Nsj(RQrg^H-O ?Aif`zPL >I>-DaC=˚eT[X2ܥ [tŕ4v^T , ɨ92#FP`Z ޼ *l54ySخ\YUAMRAzpPuh !Ƅw3n s:ZM )=18@/-8?7Q.X=m UrN0Blب*ek`]юDCy&~dsDy%S- B*TMz=Uc5{ʊF)թ$᩠q7[OqML21={QJJmi\R}`V]Pnk:gX#NYBt/ A~_*Z]qt%al5N(vU ^YG]2P7>_+LPXe$5s1v$"v4GɫTTvGE[3[keUTC9h\+>CfQa\92OԱ3%$L!t4Zó` T|Åe g(p݉L&e'2T{dH^qDq+;v.4B 4QR{h:6Q&S?BI)R.ZJĦnߞn`cK/ksA0+PS#.3ڰ`G=b%,LRb-oQQS| 2(R4[M֦"&ҋ%A?IM NI ".@Cp(pUj7ه:^SV~7._ \Oҳo)?SSޏ_%[4SOo=V./;jIO[7r,8Pbw?dNs Be1 <g72͓㾢ZnRmO'Lo3 NwwFJ͂ C#`:iiX dQ72j_=WTj?pmd{gP}~D.ɃO.}XM2<^'y|y|Z)}PFG;#KNsi#[U1%o5] noto\VLߗ{VJ MU+.Aʣ갊UਭRtK֑Rѓi-//pi'y|U9Pa\ӛ}3OBâR1y]IN˞hR;YǗMNJU;vSa6CeROQ$te:p$F U@0E-Q^8Mb^R6BI8Si` rWr.?z5K T _M ynsXx&6< Hh1WJ\͟f-wWaaq0ke-g5Õ"-n\eGE<4?\ذ<)KK_ E{N89J:M3ҼV_NOFèCh')-\AvTMIs{C6:؋/!g<;A﬎T }MHcɯd (MN=?& J(Gm`/UGŴ)YvzcH$z#'w˂zR ];|7Ҙl6Z^NkF;3nGpo4]n0ްp8VN7HW #b9^S>1z|WYTV" J7j!,7gCx | Oǰ7-cKxhd{zp?/뎤ڞpOv}g[2@֛֐WSqf]x@O2|2-P;VCfV`ղǨK8VnhXl+YLF֌Y7?'bM>3L ;L5=ӛ }Hux. vRPk ~5}H-Eq @?W9*+F[qGk\~ ,V$WrNI|V!c;Hj(,_(a;ZW <#jlގS`&<.KEt&~wfBf/POX9R3W3?By6@:~od󜔢*,,&S~$:Itp[)@ #Y{u%$ѥJ>Mj:;Aв8H+$.Z]@v8^Ʉd֑4Z.`&,*8HN\hie$TV^ CAIWh+e@ =cx4Y>3,T!>+Wv;Nz4TEJX]~,#zֆU'HltJ*c'\G7B34 w*񎷁῰I?):^U')+qbh>nra̸ YAݔzѷ (= >c iqͪ W{y#7ّ9V4%|G%.ƿ=C4Ӻ\GJ{HmDky4XjO $ =~ F;ѴV>gS%T닧Azq-a~m2CII8~ NR(dT@r–[z_;_Ld2N%<= %d- 2I]|X  TSji6č52- pku [yLq8D8! d4x疞u@EOm>1}h/.}#WB3_/і<Ǻ#tBA;},3@E򙨫"$tfWb&lڭ cڱ~Z׉_c*;vB)ib@a`o$*),Wo_οy-ߒx.˽D_I1ryIv {nu_w!f!hySmx_m˟ea?~ |_mJ1Xj,rɋW»RȢEŗdu{$.7n`.j)q])YԒzFZz4 @1g,m2mؿ%l<'$%0 u /t^rXHǮz,Ϯ *#)b!?V˃ȢLuߴ=K$>PG{zƒLbPz)>&wD:vr;o= zU}{G䦤Y'y,~^R֓jU^,gAn@>D4l,]d䀟ͣtj ,Z O|R\GTګ,r!q :Jeaa3kdZ#4hBAyI#9JPxq.BqA5X=,U}y$]}Jڒ<\$@!+HWcM c<n"ӎ]Z,x 9{+lʃ#lRU~,ߕJ (sQ~k#<逵둲 /cw>V+-C~,)7k) Q!`B N-jG(e,xf9X)r&%6U%aY;-_ArH֑YO Fe$Q>t+/(\?R׼Ex<ΡIhh((/J?l$ Nl/A4< "܁* Ž=2/g"_4u)sL{9 =ô9/-\ESY= nGuTGa4HPM%Uv{Bo֜Wa@Q4<|'畝4NoJQH>S*B-89"wqPVZLj0UF+3X&^f"L%z.u%ݼ(-ZTJd?o_X~>IF,=HGvOa}i}^i; 7Z=ES|!/u IeO`Ka`Q ^:=yk{c-?'"C;-}i $2즛NX#d UCa^-NѮ})9Yy5t:TjF۰wB/'8A8,a= 鲾wl'sӾm: n2D-i|"|_ Ʃ,F/r>R>vL۴_B\`HźFHAztJa}w7K3d{?N|/>*MD" 6aŨX:oe}MN*@K>⿢t}GH YwH/e W $x7YdsVn0UKs%.**C=Ғ`?F+nVS_:#R)K/y R"ʣtK`_Nnf ^Ⅻ\@fQ- ?\,uqqJ( .nw'xGJCʧӼ5ݯhog9"gLTP\R  K<ɟm,i6~GF'a.\0yBFo/p]!Xȝ= JbCUJ"㳃MHP7%Aj o}$oBaʠ(v ħ(ƲVYWPfu@~TC*ud u*o]'m(Lbi(;ú0 ~prEN:*l$lO#-tEZؿ]Xc<3N+| 66+nr|tS`j8/ m^k}o&Мćjۑ=Ui,RuX_y(UQ9R6ܬ?d4Dei)>Jɩ6| NItN":N1Vx, QI6 ܲ~O͝&񒳭}qJD@5LxO =SPR Q:8 S zfZ{R7'[ ֣JF1 b &Τ-צ{KeHbds^ZzHP_=YIvO?R4^CV?v22vLN Cs22jSۥWfPBݫK]wҔ[h7tCO&ccnĞʹՌ/MsEgKԘ-U];R3ld74Kʣ_tYlH&x8x@滋v>]Zbk.&wdX5ʱlw;o {o O .88| BP YWf&)aOe(晓:TP]ӞkByk 2`G"m[?ژ,j&*~MLYK$u? L|īB=YaVR7}ZT$V6.|=qb'䏯z *yOEou0$vJD-]+@rFdhj!wWGeDzHE{|&r7/G%`1EnGNsX q-A?>}=BϓQge,$Q,7U-=V~x"ԀI$-#zWbIk;R3l56 c0؁z7pv VF۰ .CW6 OE; LbI4 !ɂVK4R?eH 6zHL_ZHwPI-$;i'*fIqE>Jv4EI,kˮ`5!,oOJyEcoN[IA% 1f5ɯc:Jt&UauR^dtBuVNli *&%id88$}.׵Jd*F%ûP C{%]o@z-~2h&IɚE]eUxg^>]r}=X͢d:EmZBY/qN@I=gh#274В4@X. Jt*16$~_˟yz6% {ZxJQd+֊`s11P0Zok7[o%NTnĈ3<~,-DhZ|=3s[뿁{kezt@T}z+|N&CnZ%;Lwb{k;4b]x]9"'@a|/bhޱ-x9u5LKkƝz< H#qHuPg_Ǟ*Pd]ZF_ Nsftiڡx;nM(ypbCIU ;QMǪՙv 援 d6np0Z Sc A3Н{JM" ˈ7ENxo*c[W ~o9AEe(!I)QFj&5Z-Zr5os)AkKW=H[KStZIoP׀)ȨRV_ycK-:kJ-$7 9^Ɗ{P&A;{e6QA!B4L.io .#ׯ W&UT$R\ Fgk{ w(Em-M՛ 7V('<PJfv}iCA^gȶzj՝,~za\>A(^!A`L#W>Tobe)VB"ƮqΓRa/͍d٫L'rS\\kډ, $Bf0݇,;fgg- ̀_UJZ^#^KM6r\Kz0,%]#ûC2f͗>4-*䎜8P6?=KيG TЪh^1J Yd~ )}tdY&˼9.\eO*2HyZdҪ;)SN~1$^ 1SJ^S26)Wl(5JGGz +ⶦIՎN˜^:(E˼Z RSA6֟V& [8{wIt0GkCCt+9ҤNpYKϹQ?V6"$B}BX*bUyw؅av4@WxZ'Ͷ _lTl᳒!I}\%MS>$G9 U'-"Mf$ߔx<5p4yez_cV/k4 V K@8D.8a._ ?ZR50ȢPktlVUQh2팧!HF-B)dtǴw3^'OGف^ܔC>@2gG/su7R54m7C4<Vx|a#WYCNx1LM޴UrYp >< sy@\uVGu0^ RjbiBsJt_H\$}bdM5pԑa]J*;`VrOq>@yrXmf/KHs - (pIڛlP=T/5@9Xn~ K}zSm~}dCjR뱞'EZ*a9I'BE|޴*M $  `54h@K0cL>jE{Œj."'cЇ=&g"9CLZ@>/YDs%}lX?\LAohNI9 ScOʖ9k`cg%3Fu%? i;#zg0F~$ZXa4W"5I*r< •z5I9V%|L/;7jQUTKI=g|\F%‹+0?5KظZn nN*g˂?x4+F.x(8 5dIoZ#kYy_{ u.-Nj|`7F8I5h'f/>@a׍762/;8$5OL<\|}iA\#H/aEڊd)OGod{oyq*W7%'{:H_ x**Ip/ɉGVxq= QMF6 +w9qVlOZn|~K1MPЄۏEp/M;pUL)19:h.{;ڗim_l3BS2l&ΦcA~2܄\)m`sM8tnpzXo`OWS{XYdyfl°%!m SX]'ĎZ%h&a)Vz('Z*%&>R3ؗmlƏtIt! c%<өȅTX-%z^YmCǰ^ A?6lߤ*7y]x9dzi]V~M V\ړXRVyN`"RNyB0 U@ӊ(O)!Tg弝66>,) ? -pK*ۖ9*@)*qer.A Ťi |JJGȄ*ǚq/Gn c0E^ 8x Id+ !6K\;Oh I3qp DmEz(e$%nNSUq*8\8YmWŨݨN~XpIUOD'rsqf`T#A ¿imZЦH t O ;FxPBA%Q,IKN"hsv%0nG!` u(lxspg`Uyǣmq٢KFo4Vy}Y0]4~$ t,vC ]]`ԤiCZ6mteȗU*ɴ~OeG;7$X:Qx]|?rtI&$|u9 V!AP%NJ+4@Hh{x.&NxDm"~Iih-~s;>oj @ 7ɢ1tD8yClY'1RBlXv-ߢld)}fМyOy=ޏvIV`-R\9=|}e:%Xxt)tL#i*޹<KvMAg`pnM.\[ڻP=8P<F8#p;{yF̧:e5/G!RwG<&1x*gt42*)'YE^_oy?u'@o؄N: S? ٨pʳ] [MuzHo߃Gku3##"z 0+[--*C#a1wՌVntPBuu<+h@7Ȃ2DWəJ*yIK/O O]z\Lh4!j1pC:T '-}AviVSZ(p2YFe:qfF-7Mb9\׳z4$1:낥 O)(8b<~P!ooaW]S ;UZSВ"]vIX_%b/e2^.!(Hoz{F+ETO*{ r+)IN|R& *qm\cSEp>Yet t{D8E%;\"#|!M?Ws5RzvL_RByG(0Acɠ#<*_12t7Ut9XZ}Ŕk2+C@yT.d gAނp2䃽f-_alIdeJıJ]xAZK$5PV ۻHCR羬yZ:[vZ8Zr[4_./KB3śdxkbϻXkJmomcugy#Nn \F7fk vaOe^Z[ωfP{w(Vc֑t}ފ'/]VRǑlŨi~_ޞA +Bc4=m=4ҀW_g1j U-&# wӏEզDꆂ)aZ$6*Z~>7]F}rgri |Oл=`ޛQ36;j0BEƠ4H:Jkat[hCxq :;F%PJîA錺tdpieD-&?.7|NDЇHdS>;s\(Fʼ"C,hpQ2އЭ K҇WFޕxKcH'D-G:Jk2ްq˫\aYC湟Q$2nh6nG] 'I,qh&̂vҥxK~ʺWJ5~'%}o.؆CwAq}#wљJ(+4\׀ʢ+u+jg#i C C9m?h$x]551L |]@k%i)v `Éx&IBLr*2CFDԺ3F% v\W&z ߓYdV49)f qIm++M'PIͶվ>uKœ̒k-x] O Wpu|RmJ@P 2 0ZI^mziou"5(/S?/N1P~Mf,c-(Art[}>zU[$X,X\cx +,0HOiJ$; } F}`m.ޑk'F]M\NqǒyWV CH:AM nH"9,G661!dfY;gǕ &<4B*Ny} {_okڶKdgގ}q2[&XcEWIAg|u}YY7}24'S.׾\ξbIَr4"' ; Gl:j;y3Ou3I(fj IitE qtE[Qr .Po[n(>\uߩjAiA`.꣔AhZsJ+*ik6jA>NWpA?Fo5V~EѼR*8QK^O ڇVI P]O V0 F4:Rwvwxk*ߤ8xaÒ Yq^?S˛1TJN4ԂDX)Rh2\gKT{g;l  UzbÑ]BU݃y4K? T>+E`ktgC=+cy]7T_CMRRO(U~7OT 9Gv#v76۔1 @<(M'ΖG]~٭5QNV;9Wuytmo4Sj(aSDS)ԕ)t#w_ ^53y-Œ樫44S2GlC`NP,V΂&HGQ@/gm[I\+h-@Ji[ ;EAITbU.\Jqp[!tJ[.0eZ{gX )ua oA7j1 K쵴%XzJ' РU^H/`5)cI+۲Xb$!Y.Pp^42!=uot?Wg* ݉[(]^L].5;dž5왳sJևo!>яK9rfm}kM}N&-cgLe+'|I9A|8CFWA. }jVɯǭWM M#ZZtҞ|aV^emNMkXO۸E)l{eZ[}E: uJ,bfRS? %!Kd79gm$ Nso;[$ۍde{zw__+9$Mk=.h]>$Kh6Gk`L{种03/P 3_KK5q=ꈆlx.@3JXBH|=x;T˕>2]S%_H/St A£i:+moֵ9C9s"&!s1LG'h}\6e7YoCKa[<{{=gڟ5KYܢWl$/<8Cб4:oJKV)GyBR7A cl_D~XHc >ᵚї-Y@-9&& +dFT4=YI8cyKJ57-~w}?1޵1uL7K?i>H*l^)+E2Ft< pZ?qgGjvGw })oZ_W=M&XK h*6`+nm심^J?N !>yK,,Ib']$}7g^0?Ҋ/CECqXt_>j?2oIytQ',fZ=qW+H+qg?F=^E<c&p7lq_v;xym~TꗂqWws mh1H{74t֗6;"p[G/C'B84O(GiT wC|{5n#ل7r?vzBJzl0>5p6c*|( "9 (rl XU*Qj͵AZm=SSIN6'2tFExCk, k 94=(m|/J0W~ޠIGVdA~fCx~DAX16?*( x2n r0l{otS6jLз RpmJ4A'BGs0<@(XP X2Y HJ}j W|C1 +WlmX\C tT}&;d):߄dN' UP)-R_ bP3pxv Lm^Repu-ƿW=-,ɕS֫`=0H> p2[F ['I]6ZX7ft+J,t;Ie8QEUXIۃY2XMxhM6pW,bry Hjg_rPi \!>D;6gMwgؕ)ppA|j䟡0ě.z='{@5aI*}k{8%w}Au`9Yjfn>Reoힽ#V9/ij5زؾX3߾[.. #,3=iؖ"u#g?pV&ҦTEӑ7c}挒Wɷ-.lG3Y\/%|e/<4F/a[Z*p^\[t 邚<;NOQ"wOBOi!-=T׉ _ ,.Aje1-i.-Cflkx}duc|/DYן'W\=c@2a8f1_ڙ ?y>|TGɔx?Yr.\7GҘM3AZ)%t%?Ǜ*9=ړ;:1T]mN@B [pU;P-O\Wyei,&y ~h)Q-\\v! m_KBLx_ӏF^Zzne~r5!2B\#NZ-BFljtF]Kzß ^ܟi1VP^$PryojwGIi9cu8CZu$Ws0!P/Cɗ}'(]ԓ3Gࢶ뱶MxA`W4,&xp7d[_)|?fž/;)N%q36aXD8 :n$V!^?'ıYnZV\Fo\=JݗxR)"Ӛcd*+X$JĉAo*%.~ބ#j9W{ɳ 'z53lgQl2Z|?K0=Ó6:sn 5 nL ;YTRdy_Zt<:IFѾ2Ħ[;tp鏈vp}K I!%k}{o"8 x Q[>J*`w_ Ezޏơg,=yX20DMQ/l,ǧYРx1iKѧl+~=yUЇєPLrKOKtϵv/ !P:9D{a8N.GtHip腃ܡ:.߿lGhQm$9=|"MvCܮ@gvV?t@v!#ԃ][gR>YkO4d^C.h(hbu!n~S+ՠ6\cU؞f<$=uY-^Q;GxpEzNm;IA- ےfx FUcIE%GgWC :2D꛵+{֑u"qd*Lo|a#;g.\wߡٿ˦è ΅r^VT%t5e|%@"sz:B e]&ҧ(@q ["3r'=IP]X=E:ʯ,A_SOicYQy)eTPB7F` X ב! 0k|'Cdg`< B8\t ]2`/cv<;f;*i~Uc i-*O&`, nC4@W34UEy2Z^t*Y:KxJc"SWdy(l e)M%8@I#9xT@[;?yWbl{d *srV˘I!!.LtTqnxoc` e-0)sD<| SťHcu/ؗgX[y[.ƹ8ʿq :AN{"W*ATCi:L jslRK)G+[TO#IUxV/M^RT) #-GuLF@uLiI06CRci=R%k n^ l7si=kZrrnos^.:oCPKl+ .yxSnԑU惮w]toqt ljZ8ʋhp3r)Qj{9 9|Q*3rֽB]|U-ImiVS.nT-]L=jw&{ Qp<ylcc涢 &_BMI< ig"U1/8Wo>*RiDo=&ka DsV__ OuS'urLsw1ֈL$WMr:.dY=[)y)9ס ҉]]ڤẚE;$srY&+~"Fһ;{JtA5kX6C*Fy&qԓBbY  Fs$OѪb< ~PzWmt@y ]\ȂȂ(vƗiVrY&yG{bKk 6O>^e^`E«|p,Jyt/]Cg/&g}Uqt$zy{.6/BmG\ʼnl N4oE;ɃJcp01P.0_3#i*CYZK(Ox΂Wd Y@rQt*Ezb$;`Ȇ <>u.v4pjΪ/$GL3%I;| $uc?mS\Q:}nϫ#gqMg*LQ;MRM~B[]GZor1؞UUGѿx \ˎ!U^^._=h2_d '[m_keݴ56x5F լ  kB zC}?~ڬ`U-A}?ΌA8y*;t-/$ >v]Ô(=-Β-_[I: ?яieOP)VVυ>2 t\P-2Q2qv#X,XPm\{Wq5 A<d+8?L<77O ;Uκ깳IZ 2ygW՗,hO҃GikmZ;Iv^$HZ`(x_mi"5Ֆ|8I"\awBίJPCA˯Ki. kᢨ ]HeNcp ToB`0B}tV`AĞ+@Ei?NC`\AiY=L HZQd.fI*}tH@=G腙syP۬bXe6- ~tT ,G 5C s刯L;״/|7}Bq| m| @1">)qҐ`VsQtɼ ۴-bV64' Pue0 Α^GCd9}فiӊIY#d Ą m~2M#5!a/cZwmM<4֗4 }3ܕt!ij"Tz͒!`ҊV+@%|VV{^8)^ωg ̉x|pJeEKWN.ىcu FAp!t h ( >'HaĶ澶楞ᩆi%;.[K`s5)qe剥A3`}ԇuyx`Ҩ\2`kit^ݺ)9eOMsD4 A3`9>+Nt᚜Y5G|amׁniGJ7\sP@_ҕ*rB瞷ҞrhWgKE<Κ])×J|Yj ̐끊JGi ؇pťN;6.KMeĪ7aI%gZMM,-\GjeKŽ ZҼ[&;O;x =ƚfzZ:%>RcwsCWGv}qqM1 tͶU62 ]IciZM1˯n{!^aW;뇴/il]ݬ!s防Rn9ט[O,(W;*tzr1mIbe{=kN)O4DlJ<;)@RgzQ\[-VT6{6zP`KfLԆHE2#+P-^BHguQʄljD{2}g0!Te]I&Cy6~EM7Dmbuh^EQ.C&kN"%HGp23Y5O0OFQw5tUK yWlH{0ݶH3^X5٨FU~Fl41Dkmjoޙl=I&_Z]BQ@cic݃yv='0љh^8^aCUxyX"53z'I/"@ RT* Uy-.\Rx&=ҫj}V8,w6Ͱ8ՠSDI/af_+3(I. QݴZnܚ%:#xYt%c)/7+ߨ=»lvDa8||#0ʀHS: P_U$A߀++n#{< 0U߈_[?Jtc6t!V=TTSX4S&4}C:$1jz@T\1{z-8>=՗{7ھY3}ѷ,˗,AF~,}c?dvNִVA*a*@ _y3csb- ~ k">0ిOCϭLexXMCl ȷ[BpzK*ቨWjxgpﲷEkcu5WO zai"BעҨ'®K(+ +OP a;p_ӰM{trx; gp8vkK:^?624X `h-rΒ^8[KXxC8BSV+F2OR q$Zx beL]L-]ܥ\y?+J<(<6K"l$0GNZ$glo}o;o>aZa%h*\gev\j>7D 33;LԱ@Xomh;k鬃x,F2ǂ^nal@ [ G\Gܒ8;q? :gbA=IR()oWQڪ<(N9u45wt{؋ZXO;eFou;RH*Wbbs<[(/URIgM;mS<,l5M=yGDFO0R/5M+ٕe8XHɶOMLm=ܰ?*czN9g[}6=ٕDۼa_ WhofmQ+lg / ţYvo,-L){JQP)p|D4)ytZʙނA)6[Neu"K9fXH]6Ka?D#iIv1=ns,σIxWdK'Q̟5QU[&ZhvkKg3[ G0e3:BTkj U \ug^SLTա-qVʌ*+EFTfsZ^MmN*jMYm;zmbu Ys I?<5aƳrtFvv򷶍u,$[]h?RUH=AI|+uH{ 62zTmDnv92_ I PbEA x6U.^^r> Q=ܟfJCf[,rѹ,&_38F; X,|++w؅3YC4tD .g5ֲѓkS=N<ÅBTmC5'?,C^)4ik>ޛՓxXA~ H!H[_M< +`ӄ=~irT~яo}n41䥳QUpA`-5B\-쭚)ZN]WW$@UO\9˨X`y\- g:%Ǚ-K(ŏxXwdf@g3S\ED&$cFsAƇ´xL7<7{7JlzsB7q#`aM`6C-P`˛[/V`Ɇ}s嶷`S҇X&j}q4%Ԇs3]F څ6"gmS ԇ 5"z$HorD8^ Or$b, b֛ISvcZXHAZw3}0/aql87 Zr pbFziKhvP9P~@cIu<Oy\i\񙍝mH-M"OTHh;:- >}TitdXrhm^`QoA&c&WIUTVuxyE Ҷʦi' !opN􋺕j3y%#q^.enp46YH[fZMҺzYY`  H&P?C HzuxmOrGzl)=hi(p٨(B%䣫3wk zQ"X<kXq8Z*3<,D@b{YUgo.j?ZP<~"TKg`!\<2FJᯃa%xզ)p:~ }52tܒei A{ __e<\JYn@Ru4Cw3wy몹9ucw0W..=$_d?W":ٕi`4T>(ay%d<`o*c`V8ܒɕ3/)6$~RzPV+Dk/+N{(oi:`$0S~./ =}#_ #-e k7rwdx{ z[Blmd>%}=!]k5cAA龬@=|\洙M+#+,%6iU9ҜdI\aQ-yo1tO=|Y1`g-a5XĮ5o/K}D{qsVXsYy IP+QId곴IS]5Mm۽x +W;;sٵ)-?mg[Jkp7BFd|C(\kc94\l,*geiF䓱zE's$H޲j1kځmu'aS(j%k4^6cD4]O=Jk9z;x+^;[ױwj :7^<o3 J$7y{|섾N{6V~7U#.Pbڛ7O t(B;Kh:! i]cm;V)pPfY5Uy)B7Ao]:tcE{ IZW WKyl=q`Smi2ޡ&c֢HpjFjΒbS5'08B7Ӧx5™=J't Ӝ u;xLv ,GuﺠZF)Lx#E[}j zn52<3/N(znlx4ËpihKN2ѐK ;5Z m Np/CZ} [iY4%TAzZ(Z g]wkmNȓkTO3dj% ?*.I08f,{%gHqv'T,7]] o1zC}CX" I#Q_ Y62&uDϑ=$Xx *h7 ڏ,ת9?Sd*z>6\!>X@&ZښAq6D7pIKK{z&CԚJ]ݕwA0zXMv@fV:؛I z9{QiS";$sn  bk ǹ:.cǟ{DwTEڏfG. FVŰ֭ReF2pF6h:NH QvWNx'NFz>JBOS/[:C0HSTNhYڅƥi1·p4jI&%َwcB*eDWB:W2^fuJ;cp Hj=|eGI,ߵ,"\vO2B L [J<8-H!9Ofq&΋C\›: )ճŇ 2FZ6b2C,1Jኰ>u$+G˩VPdsQI|BZ/ Q6x@JT+jU8^ΟݓBu'n8CwvW{+>-B1uQ>8p5q,5Úƹ[>zj ǑDڋ5%W2tܟ\*5D%C̋\E8&y pUNG*j{VeY+85K0&nB݉β{$4ymGMM:!+Fď8m%ܒX*j!qӠr =|Ep(Þ l-.x ǍJB}_q860LK+Xb,6{{clw.Nvh-ˢ"'7p <RLE-;,R;sXz&{:;xC=h9rB72)SR 0TWIvB孻i=pKNh>p5k_Zc0s罜|i뎉Pcce}D2RHiYҚ >>;d:# V b+/ |k%t; RR5xթIny]4.bG/]O(響V{Xn`0x++"3l'5+Z,EޗE^3TZ8pzwURJշ)mO`=D ,C"\ d&tπ#ʓacGb,uM &^]by4:}3uݭ`2(~Dc}hy'ղ>s9J!&s`.[vr6= QQm]" iUmJ5|d\; o:t7Y+ bgD7kڍ{j=GB:ig\r7G;UHK(YF᭬%9_p>XYF:FLy=E] $ߗ-&E- ڍ 5)9o\sl]bNc#ĂZ饜\ nipy-0.6.1/nipy/testing/decorators.py000066400000000000000000000056671470056100100176600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Extend numpy's decorators to use nipy's gui and data labels. """ import functools from unittest import skipIf from nibabel.optpkg import optional_package from nipy.utils import DataError, example_data, templates matplotlib, HAVE_MPL, _ = optional_package('matplotlib') needs_mpl = skipIf(not HAVE_MPL, "Test needs matplotlib") def make_label_dec(label, ds=None): """Factory function to create a decorator that applies one or more labels. Parameters ---------- label : str or sequence One or more labels that will be applied by the decorator to the functions it decorates. Labels are attributes of the decorated function with their value set to True. ds : str An optional docstring for the resulting decorator. If not given, a default docstring is auto-generated. Returns ------- ldec : function A decorator. Examples -------- >>> slow = make_label_dec('slow') >>> print(slow.__doc__) Labels a test as 'slow' >>> rare = make_label_dec(['slow','hard'], ... "Mix labels 'slow' and 'hard' for rare tests") >>> @rare ... def f(): pass ... >>> >>> f.slow True >>> f.hard True """ if isinstance(label, str): labels = [label] else: labels = label # Validate that the given label(s) are OK for use in setattr() by doing a # dry run on a dummy function. tmp = lambda : None for label in labels: setattr(tmp,label,True) # This is the actual decorator we'll return def decor(f): for label in labels: setattr(f,label,True) return f # Apply the user's docstring if ds is None: ds = f"Labels a test as {label!r}" decor.__doc__ = ds return decor # Nipy specific labels gui = make_label_dec('gui') data = make_label_dec('data') # For tests that need further review def needs_review(msg): """ Skip a test that needs further review. Parameters ---------- msg : string msg regarding the review that needs to be done """ def skip_func(func): return skipIf(True, msg)(func) return skip_func def if_datasource(ds, msg): try: ds.get_filename() except DataError: return skipIf(True, msg) return lambda f : f def if_templates(f): return if_datasource(templates, 'Cannot find template data')(f) def if_example_data(f): return if_datasource(example_data, 'Cannot find example data')(f) def needs_mpl_agg(func): """ Decorator requiring matplotlib with agg backend """ if not HAVE_MPL: return needs_mpl(func) import matplotlib.pyplot as plt @functools.wraps(func) def agg_func(*args, **kwargs): matplotlib.use('agg') plt.switch_backend('agg') return func(*args, **kwargs) return agg_func nipy-0.6.1/nipy/testing/functional.nii.gz000066400000000000000000001212731470056100100204130ustar00rootroot00000000000000ўJ/home/mb312/usr/local/lib/python2.5/site-packages/nipy/testing/functional.niiet[..i.vOw ;eq !Xqq9?QVͱXkԜyىІ38 `j?O? F^FSclqIHnT#6?GWA5!>ܱ{?rb>yqId0Kϻ?:5jm8r@O];Suq+;Ea@ sbؑ90F`R "ToI+]iUܣcUa( \$m4_d:)$MFWRԽq rPنCd,M\;kâ&Zle +eF; Kg;K 7i{NF{He?x/n= 4$&us(irWrbghdX'ȡ*iE8ӻ+M4zApkłî/J#23SWzwUCW<wOX+LeX܅޺xʫױHw \C'WrUbŅW^{~0"$JХs6fxYA_ U3ueߒm9GϪUeq֨GFZp!sq,>lzF%@'Ak0\NFF;!=\gKLm`/|أlsdr B6Ӷ^гiBIaKSA"#vݓB :sMv&zPu/W=`)bMk-(!' hv34$rR27 e#d.H,Пd[ܧ# VڠzJ ,Uu-KA#t@>w'xm;LҾNҋQ@) Ml6n-wj!2ߺ9TPߤ,l YrJWG:.4- ՑIt)^G|b\(;Gb٢dsǔLM{ s <~0#TwWfL"*Lw}ϡǻa| H˲ \ Qo:oiϸ[P[WAIįы}iaS}yJ"ú,嘿== u7i<|#E>T3}00֔CgJ&hI2Ґ#o=<L~wVaOEjPyxh貍 z_o[RcЎ |oxؐxBV=oTO[ڛީ^AfoWйx]?X3>n'1To};1a<3tqC|}_7ObhK Pz|.RylF$Rt@ ߰Cn{ ;c]]?HO`?¬wSݩ#| ЎQq|MU|0y@Lڥr5&(3r[:ao)l'.i#N;e@yHΡ6} (RZنו'+Ȍ̘td>-#mEHߛf7C^⥼⠴%?R 3f3g!bp-SFթ7̝~C]ڴ).>ɛ0*qxx[4p[&5CƌK.6[*P!;U+PNnVz*%\Hy>]: [RcZJ]NIL-./,^q{Y oyx9 u_DVGN^ 7&K=mkdž|+ӬgC4ٜ8bA l lDK GInݗA70w!<[竊ƛ]dve#r[PaQӾkYa)dVxR+,W5imHw=pǰ` d/߰ixf㫃;w1u#}ӎ/S%wuk CFF&Qe*dnВUʧ0@Z--syL-\#Fycz 8̅+|L&0]T*Di!y%03WEKOQBY?ݒudn\(Fۖko^"#wU{tP`7pwۈ۟|jz|+y<+H /bT|pC6XELJ[uWJC'idNHL ط}ƛl:Pkr"|N֐6lT;g40ǎ  s-z K_| OzhG-\L*d19:X"zY(mVgJa)4u5Y y-`sw֝~T>?XE̲l_cʷug/[/vު@wff/P)Ӏ߀z$㯑}S+4^hۑ/D2<P3~ Һ[]LSz\-t2)FҨ"D^C] <;u,`y [fc;3gp$-Wۉӝv4v~P|AlwwNɋvelv ]+Y&+EfBT>ԋ|r C}+*`!'}L΂/ ؿ=Ҕ-~W1K'k@zu2ˉW@NLb9N\Y3RLPo2YǴKY2@v4,^6(! ;UEҌUXl$&źJ]%#p=ؓ5Uz ' TOJ  ;+!82QozNW{\aEk-=~Ma_?u;uDMx1MʊMu~U6 r/SZ͍L vaۏ̾\;YQ9 ?2p'7Pu $Ѳ8uCt(Mp 8ύt55bst zƘc+ЌLtʓ?4Yb3g'Be-VE8oiY :? эp3öGGQBj*R4Z&Q rRIlۑq̬Wv W )p/O'#\WEO8Σj@ Bp jْCLas_"BHy^XKIq(e9mM(GKAFїǬ˅q^JUgQ9a\QGߩ)VH;ETQ R-[vԧ}GTQٮ혚$PH }Dej놄 W}'ň'3^˦ u4-]AX,NQit+ʝ]@bkt5|Np4SV#cpAAUQ(36cœ%mfYE/̲Frk7$IτvIc`%;c߾E͖A􁷇9@0k@fkw`[w:w>Ec<ݟQE1;uɕ`\f3T5w,L4M"ɥUXo.4mMTs-jGs6r|jzA=KџXAe!d'?Kf)P-$^ _U^{[9m+sn01 Q)˰6C{S}9=?Uѿam&M.\A=X!tve>- S=D󬶓#':ߡ} X!oɜe)n{~GlvfzQEc +-O[ ?U{zVU.~2ng>`GӺRx&0Na}c宜~C:7L~~:m-zK|pehC`^ѽ1$7)Vb/[9Syh(;Kiz?N8p zɞjg/6AэH6VTFןPK~Jpv_@=>]]=k 1,<4 7%Fjn]}Ѝnm߄/FX؝U"zسO??/^#~Y <+[Ԝb=,kX /QWsm5F+Yf*|n÷,le`:cS.)pI9FC>Ag<1$[r q Z7~6Z&C?~w@MƝ1ضʬD) qtJ,$(O @LG܋_=䄖fؓD_|TWjgO$7/bKRao D ~W\Jo\G_oybhb11gl?gOZ6wԸR 41^fۦ_\tHH{J;L4ۉ*zdņtN`hiB;K6 |lGr^|(W%s`vikqD#vqS<:k64e2Z/ɝq`+$1mzz\@Dxo|h٩-f`̀8O-'6;fr|ZH3:gQf\\䞪7ccIaX9?،f^"e,[dvO!X҈'8$m2.96l]91.rB'F27%|{Xժ^0\7׏ӔOx7T9ax]K_"Ͼrg6/9b_è$Fx+v[K3x֥_5=kJ֫cMDE]:"MDW}6lmy{1%$%vm0vl未o\ CMs%~B;0YZ2(B۩():ڎ< GB48~41Yeӛ!F[?Z3:ӷM{OrfRA"gʛ&D;T6;u#xc{$M^;\k L`x=A ^|S w.x 9FaH4*09 w@u 'h =ȰZ2=S}v _(nrfo8A6uZ޶VR2H^aӪ&68v0AO]sS7Cd~S^nq3R{/U^/ÙF5dH[2Ϧf9$~lrB佸p04]fTn8f_lgI,$ջݥ 4x>jGVN 遶@ju+۟ڲиl WTEIql7Ga训=| ֞q|ԟ&TTuhoC)Dnqﺘnұ-Vf 龬GO= "{dvQqEIζi1GTR, T*-( ,?:7>Ź}ozL Ӓ"j5{εo1u%wv:AT=EȺ͋ӣw-GJkE Mzr*;b%,rуo9BSB o|]-r6E J6s/-b$zNL 5 abgow곮}Q`KF;Pҏܫoe+V:hO>wY_kѹU$bOX<ì (u\CM*1ܶQQ 2 Ն[G |s֧N0yL3=|cLM-yv=;YZ D3LcZ&ߩ`2s^]!u-mv}k< K;%^+9hX=b a">gBxwfrU tҝB#Mxof)be8 ù0Pot1ѕx ]#% YwkT6Ջ\l.2ui5_4޶C-;_0KZ@L˙Է&{]koKw ʯI9VMoԦ_֣ X[[aP@ <>檎7{d!lSNTI ']n(MuN=O6[$"\g懆!UNx AO{vAA,O-?k^Nj ǾS !dlc[AgfiWxYےGVo<]UJ6)v-u1eq|Zo&aE2󡬮bMt6 ;PƋ^0ꟹG~)un_f۲Cl61.m:l!nK{qമœٸٓ/l^6|x8:ww8*wI"׀c%v?xK؜=x@n; "2lLfOtuYݤ=N4NJT_zNugLxVܴD4! 9s: -ݎ|(6-AV/ٮ<`<7s6FRO"`l}.`+oErVtt܎b\n H^q^M $UxE܋ L?T0yo˜|y7N\HtuJB7XѰLҒkZf;Spã"ֻj;ULd̩|B8>*FY%J"w*RYeuk浜cj7C+HzZ Pf|ejMvxdCp,0tg[\1oMs]6 9ÿj8ЎXz #)-j3X}.>Дx+FaA:`!|b<1Uvh8_bii1LJpgk^dn*ᡶe1̀L54qDF,+Pv=Xg=q㣃;} sɕ*1FSd<ӛsˌj@'=F~2-T {7gۺi"SX7jcMcelq0R +AGMCAZXY¦ ݓS&JA=9P13;oL؎7~@X0n5"mog* .R_!Z5G[Uz(dD0@Ol Я>tCb81+N^EP+ú sHECFF,LqIX!\}B^^?ǒ ,]odb=XJl L4{F5_jp9_ +hH]߄$*N5Y'$:|!H VsLg&> 궶fvTb^Hx#BCh;pAL ߏDOx3Ȍ?Qhd /Yz~ he'F fWS)mںJ&ᚩ ^|~6Z bn`A/ :H֪+`f͵abn1RmئWE2$X/; q_FجQ8| kRJnm _e^FcpInbJo|o=B?l;0r]fϐD% \YV_ʯ?*g10/U_,Wזք֞bW$`NĂU7©ā1WG:z͚|jfC&Z70ؓLrPMɽ9{i ldUS`nI'c)v;P8],=Mڊ75"-A,rHNaOq4&,Jz?Ѷ55 d%oV#=O3~6[_BFp,FasCמy mq“A'9Đ-':x1/\ř&m멯տ/r~X5bDFY @e^9#jN&c0g/?!Q"OKhR\>$W[ ]Xʜ=θpNpt [$qɥ;4 PaQBihJg,Uzs_Ja- ô%Qn8PNMYz٪5bW\gi$/_e15T($c 唐~6k[(u~@nD ;xlWOoʪa5VQM^в{!>_33jqVDu)`>,[#& ML'BPe<w_<xۅqbT OXy2^5' yRAzIj/tLL̔ ;6]ukљ2UV!]-6=aqVD['|flCwvFR-=|>:?d2t|#qQ[SI7t4*`5xuؠ ع3|Mrb.X:ϑTM6OOaYp[)2 *Їb6f%!c^I~Tx؀?"O!zgh "\nhS"=$y:vIPwXa}U-F?Znj:c*pv@`K ܻmA5~?}qf C~, ےk $Դ4=uU|.a|Nz"V~@/ 4}q=J@;TYLdMEI5 po+JU$EgJL`BәT j$KvGb8DaLgQbj*~ռFQ[#4#zڜT"lojJ;^6ttM *3DT㐉vˮ?[)$ |HSjtpoҨDRL%D:`>6]eD`b$  iTzé mK u%f  fC1eO=0]Ꚓ=y܃:%%ֈ,~'$|Ygxwi]("z@(]m&N79=x&_#VXegfW!rIZssVEs3e~FsǢga=v#M-Z"5NK qH^_"iW9@+sPSeBz羥kYc^Bm6wOz8; 2Ŋʸ}Apa7ʯ']NC:۹~D6nIl ~yryp.?*lvU(tqCJ%˭^}QFI_5Eզj^'^()Mc'wHbt%U" Ae"s}롖.#j3:B&B6xDs(PNQkX5 MENwP,z+VrG}L]џRp@z8Shu`$EJ3S /èV$v[}/}ve]2VqS˩Ba2UG/mRt=~mF\!V\?@! yPKuI`_Jo#,1,G&oR"7` ^i-6^ Ert vH҉mZR=!gLT5b0,P`ngfՇ֎C.kcoRosN cljq":MZDJAxdקTS3mr=hzs]Ͽ|4PtI1us \!}Q/M:ТҹFI,.SC4e85@/Bb屍(XEG]%OpȠycJoauU$%QQXOz7ۇVynʖH${f#2љ?V|'롋e>9"]L&w 8mۃ'9?Drj۱6j}Cf jUu}ƣ-=OV!/WO8vWDNW3zYdP W^ ~! OMof 43{ {ԽRҡxXIؽNgВIՉM>{MAAB&vy3avvmBCl: ,:Mlxm+d57IͲ~gq;.`Cp7VkZC#+ԦD+H(DlhH9jSaq.ɇ3@ht|,"Qgp0'o([#\\`jؑӚ8b),B\?rwB, XjC#\ۜv1u/dΤRX$ƥ }*@Zj Z=WFeO$[j;R4@+4{^9!qt3q 0;/yՈ;Ӣ #շJcp6;^+iE˶Fz2!7dt^KK?=bcʲ4ʧnPP1\7.-[$bсց R&4-j+D\m,zyמ=_W;ѧZ;̮)1ՆZCw?#6J=_b|UOeEp4h{2|y.~JU}uEj[cd^JeY'862WuCăbYL #J7O_S&IfH(e l׳gzZw_t\#n\#ϭ\U9-[;|aV9J,n\ae=8?jW .S/>?D3O}=>u!FWԴxStnC65l"ov3s,vU)q-QZkq܈p&BU#fb79\B+ku9ꍦΎEւ:BGCF'[ ֠6]$bŁ$fsv45NW3عTΘzHxT Ehg-=elú_K-I4Rx/L . g`0p 'L6M㷑,C8s[#I?Q ]Z`Yuvx T$67W;A&\舐PQs\\=]uJBap[&|#pcF*]ӠX&|]8mFwyLqԛ>QxscՍ36^44agic~֫&7E--=%p𷪽CCծ+q[%fڈ " ?WBqS[l cKL b(U7FC45/jg佄*nS uZ &d>/M6_K6Efax;`^V[{ix,zTpVLKyϤZuΰMvJku= 5ݧ ={LHώkt o)Ѫ PUo: Eu7EON~Hisiݯ.)I.;M}9\ϾN cz0_v'W ߐhcg@s ݪOⰙC0r_T;5ܓ(i_󐸶~E4x´ &mM -r_KʙIX1\rRsWW8G gBUY5ޝV76/rvQhN%֌hubBBms>$`!މxÇ-g u/b"kՋMC!n 4Av!od/v^=(0y~h G :){2O'[B̓l@O;:\i=ηa"dF l oiigt[^ETjrUwq:mPHֶO|Xi5#J>.&#@N]D6D۱1f\@1AH} \ f(U7f;sΌMLF}!;X_N4cC [ <5cڇ.P9f?4?dN`$2s#DZLqx/[xN-!J}ںڕ Yhi- UW QpGNV,r?$H*5 n(lcfCkmݘ  ^+ymۆ#`&T+=)pfF,Gζ27εBn)~ lwaT* C:YLCcƟ;V9;| Ո6Чgk?3z.im~4+x64t/GhV`/{:"l-?.&~ABӱz b` M\"*__Ocq/lP*CዚoR؇ka2FHfQi*WQ쳩Dlf(O$-Y-;:,f8KT\OMhSALh +՞ƛ(׊ÈIREZy.Wn =:`; 2b9ei?e v5N[Ee{Lru,`N'dIN_aaQYmX,q7kG"ʘq}Omww˸!6-V=U 3.p!Ma bWT9bL\9xM~oL?@`}~/.ϟWf~Ӷms(O_w|W. m3FCcA-Ə؍Q4mDGnC W0?h@Ll ۺ(Q(q[R_'o7q6 ˙-i/hk>?u.5`'/KM a>mwE[?[J@YOU/`nkf}zlno>$X:V3[-P߶`MDaf iV[X("9ﴯ&Sh6nDRyuB~\rMB+ޘ`3޾L*4yYy@j~mC8!Sz2RH4Q7#FS"lt4KT|tw&M0vC7)*M!ʄk@kvN冡q+\{]B=zOHhm/>JO}J_=~3/D2 <]QL~kcKeL2K ZOH4\f d}+`IS ʭ,3Aˡe7_5NlʷQI)TH=ʉ;w3sخZXWGnL9wGtN~:6Q=|+B~k68(kzh# t<ݚΒ()٤ձ 3.n#h5>k*7:m%:`cQ PbE DᚻeIҁ CZM|Uz9ohZÎ~)ɉma~oԨ)̰!i`vgw`TU@m/+1d*ڱfjG&&;Ӑ/$eg,om3{+lQ[P"Z~~:DSYosmל2J6vaxǽw(԰[BhOo22^q̤Wq(̞(*fչ- >{8Y9klšw_}T>UUz>_y+uuYʠ u*+sf3&Tvbr;\D^mԴ2D8J2'epZK8@gpe~/FP fluYd-5r쳅 BwmCƽ5/2F*[LP\NLk2mj WBpX;;*k)"v|$7j[#VoqP2̅:x'xvy2LOm 'F`1Ig GSlKDɣڀɵ |mMͧu 5ZBuwcz73M=eQB[JMʹ'ֺ '2k-R(kdG ^<6V8W#,K#b}?ޢrPQp!kV;;JŸC,4n\3p'Ȱ<1vQWV2#IB:@ =!.]a1mjCU.e>DfZoE̍r$fXz+lDc90O, %&PF |tf"Xx#>,;svZ~]?A(;Feӈj tM}0j'˺Q_D.abaE^̿? Ň^SnyS(M L;,4IHvo=393DԯWH9ឨ_ѱ ]Q?6POI?w(o(![[˛$Izl~/-_ĸnY3ϙ [kaۦ̵1Is}庇TWsFӕV#bewmۆnx9EmF+EZ=nj "4\ZajU%1l::o";~ nŸZ@C)MAf  wduE+h'.uQX2JuR`6^#(꩛P M6[HPVM-+k~nAbO\AzN"qö[rx}jHz;IF땉! nÊ@B\Elmv1lT-  Ov/\?)wltK&0U -S1 @@F.s.'`Kf0qUhSVM& )&Hcv竳K],d2)wYy&B$lQtE UKV**gIʣD,%(;_aMqW",{5Bq \(fHj6B$.;^;ogEs̆RG3Pa_/k6cbW1pݡabm1jʋx4"201lclDz~&ÞdBtktW~_v9fL8vn6 )=V4i *пr? Q"EfdnQR[lxm5i# qGיvS㖅+i#` 1@5oqBŃA"zny('9 ݥM?M-vdhQ*0Ao&q[lα;*ucdشgퟧ (:n)x­(Us1M0k+d319-Uʦ HݪȐv|$BsWG ]C^GtO_v,SQv'6=߲:/ib],~ mFjR2Jw=}R&[A*age@D;QHs7R&x6^t(n6ٮ.~2}'p~EkCAFl򐶥(S'*]cH[k'_+SXQnڄqx[7ډJ.dr3fI&;!2&hkE&4S$SG)z=)k(F3_id 2Twk~!Лr2ٹDm()6=rN@34,Jc)OaC4lXҦ0@؞7)SΩ BVh6ʄ/؃P9ސ;kBEuo妭淚 45.n9/v u^]b݌]PxOU}j:li6_U=԰GF톹(4#M:L&C-l_s { /Ĝ09B,[^FkNݨ>۰E*)z抈``33;]2Q"G@ޝ_-yۄC^&ݝ9tTdNә(x=˂8]}C6 nTk:1\007g떈16mT_P7Cϋiw߉ƪ`&Z\_0$֝-u_/|~IWcGpXϴdd*ͨ˿= =;K;a0#Veu_u6[[D)+3 G[1ҧ Y.sks&hsViISgbҼw 8[lf|AZm(k'|' Pp{ (RWleXx rͦoBiT_* ypiV55~l\m `_UE72參 XV/oOz-QԳئ Rt6F83]K eA.t[k8Jh,]ً<7']")C*jӰ!rw.g}h'Yh{4W;ZA\ӉrrcisE!O}d:+5cDJԯn8.qvkS-aaIW92dʯuW~aq#{@J.=$}}͞Kcp2!:[x> p&-w#CiFNGĄŵe*۹@ oWn6xADF#%H}+@™c cĝ`icix:2t? ZHR.T:+UP)?#rGtv}.A%)5d[,qAE7)2$m[{;x 56W]1]Gzm.S>1/4ټ0$PkH>2[XDžHL[bGKT^ |IYɝZ\29dZ C_!_dFo͙UW~-]k%-O)tZCvox#"᧓v*G"x'4dR\ЬjV\q>=sgXqx%2fjݙ|L"ٹ;M厃=^:+ϸ;+}|.? a,eq*i]dv s`Eڊ1?g)Py='{Jvi{gSa9 p a8KWs}K?$} @q!tK"׹1hŷ2=,v;OMOkʊ>Myޜʿ?ufWԛom V ]pݹ6%u3 ۵NJ21 vm톾^9A5~ G1WSlN6~D<ѷDKL5/\Z"E1I#d"l˲:8 ``(bnbn*04 ӭ71d+fJnE; I{'Dk$zEwx na&N HDC7O~Gfrld8Meۣ .AۺH}$n&*2V;W2Hrk%!܈iF hׂwKO*!#Üfk1NݓUXUVTcRp$w }nfpaU]d 6 Jyk} eUX ^Z9IJsWҸWDzx}"1e# K9bRC??fI3u`jk X멹7e" i;%5VkpG+ƭکdD]}ԶǸlOD;^"MR-.>}@֌44cc%Ni壹8\6o65l}ͩE`1:SfbQ |NCMg@\zXHg6G;&4\E"֎mm ԤO3~!6pbTx>'A#KDl#a$Oɬ#5:+SFh 6O|2x_4FtT3SS̏8Dtr]`&NU}ึR h !AvXh/p<:,/r?yrTSQ{Ob䎬oT7f ;$tl :NMsLdjM8O|hg(qeJ`蹨g n(X_Oa<h57x2>ŸΘEl3gF.f۾iֵQcM2FGn b)#ٚ+oo.ft*p^ƏXcM-;P+sLi)cfPĭf,Flx:Lw᛽O/eg_#p?Ӫl`^+v(^BZeO̗U&೐K4r+<(DNEQ #c}q縇Wy7Mj,jgx'4uܘߖG,Mku8p*l\giKX߰-xa{:ϻT`UfF`FC# =qnL#`<Ѿ@N*:stMDvu:7mGCZ>PޖPzIIzDNI] K ]۳yO~Qr; iЮ},mX'H=873i,MTT5& 6abu/}dW:kK=wgsp+tDnkmn /_*^vNԉWr OiKqo"'*/U+2 y_,fyLOc~*?"aʪ̆'.c7J~Y=y]wNfK=dR9>E :&de5PW4\Ӟ7U]Ca|am8aT}cC2JYmP|1%\ LIEl|T8F$!yѵAcFӎ2?,7lM b>WxiybR{B۠Z(|C!7o".Z/F*/⻛'!^j) vM4\[[ w=͚O;*ed&yfӴtpܹ΍=|gt Js'ځe]eCحXU4<_m D|7*!mTSXݲ,MⰵڮWb"j E~-GўnT7w:sc$[i#{8?LQDZ!bM{IO5JfN RU8A15Vz LQxK!/ķMeCpJr  c-τ:&s ["z&kNQgJX{&֎\6̋~i9iEMSV/-O,!76;Q4CoKV\pUN ĴkzʐսoP$8>K==}dڼ$egݦ+1T?}&kK &rSFz Dt#8^/8rf͸0Lw8&Z=Po4Ŋ#hjeX4R]r9M*j.~8Nj~S`&BSٶZf8̢S\W>%~v7?tI⭶fBP}([fTo*[ ɶ+Ϳ5iÄ*zUPPD+dm껄-Y(R$>m+C \t _Fh;tem\O262UZjzCKկ($U[E1.0?Hh>R]P՞,.T]1:5`kSFu;@PU0k VxPW\ՎM]_x͘zxS=R/R8e#Ia;:)&|uߚ/khן̘W&c,j Sk{'qi;3u{u5 a.CFzxj ["V5+$ۆ(& %5 d4|>=09|vλȿ扦DS' CSCaWF[$|IEl5w@+-=҃¯ySrg䮻[VO۴TkBVVp)_- 帝djX쨫`>fIͅ|{6;eB֡֍|jn4sYθG&ϙB uhGϫ0"t%T?.V.mғ4r`5 .RORI/hg̴ا,4W4,Tm:&l>_921摞 ")Ws\nm pTV??33` p[F\n{Mw k(F}G TѤb ao9RDஷkۓ>^3 ;ۺIԡxqLypܵwW܊6M.buCX5>b=0{O=O<{RDuתmyuUҧg S%*7kY!˘;UK| 0o~C:?!=Lh|*/QORh8 կ\ -'|xr3Rڦ_F2Of%bdw4.o3{9^#?Y }NӺ/;ڡCne/. |AMoziRmi6/ hG#/[#aNz@GT?w?۽c2ΑA`F; !{vmRmY@5*M.!~cJ10Ena(zĐc֔`aSg %7P7J3ޚnIoh92ϺU8y&.( Ŋ&i~YBC|sF><>J`MN56ڪD/Zimߢj7M'|6QHڲPk 58+U1XiDۄEh[oJp QǾ}ִ5ZN s*TVm/W|SfX;JVj>usˑa|So y.fz_wXGeϋdYR]#]M1So=Qჷ}|`.ܯ+^mH->/,ʿz-ǛWl#$[n&vr|h%|{Ū r)g5mjp/=Xa=k֚7V~8Sө$%"voNIJT#`9'N cPd*xg+yv;ڗh[*R K1]'6Z7 [e"A"i:lPW9B^mTv%~U϶6 7QT\=3&j X/I*t䩪+mBf##1+e^2$qKmں|-(Do3D a3)G*/,2ylf"X mx݆@U>k8J< fNg#6|}faG0B(}{:,+O ,>K^HF9{9`oLTNvD\_b39FKnn+>INP"`gw|3l?!j둳O V:w agid? ҹ47x@gLQĤMV2Ԟ6rv*Ukί3ma&U.i:Mw*u_zc=,6BK oz]]@Tˠ,Φ>oMm/lSLKh3"R% V utgulWyFEq>|t Nٝґ"tDQQFl{{,+X0{ cP̙yw^/ޘLuYtɮGB(B!=gXRXEa|p . I6lec^c~=epFVݶ䋥cںΌ=CRVBmSYG_{Zx,@;ְ->D|%TkFڄO-[~rjyHz;$P"#%6tZy8S6) YA4)&UR7GU f]%X/ЛM !,G0%4y@Wꏚ[~#N߉D&US,7& Z!2W1 yU7 d2%"6xt=yTc૎.ЮŖϥz\G M|bD5 AS1.1"GݞIDjt"VQ#R &m尵!vvBRq>=; Թ ET ߖ5G &8?Pk"jQPYBE%p *Zuvx 0-\<҂':.R\Sߑe~Xl[ךּ쯑m=ФxiHnT_D^~d#MX!YCFM,Bs+[{T1j~m `::'fZchƭ &GC/cL濕X lLk 5oUePAk$.yx>d.hX#3Zp"tS&8 񶣿: U< 0Yn3o/#ϥ73.ܙN9Q}o{zwiB2]l1:Ѧ^n/+>2(?K-Ԣ~u?HtK = R3R4[@P埐Z"EMYg㴟]w,ұ&jm "[h+8CwB[ΧI };,Sȗ4v]+\mwKϋ XN҈!w9x4InCJtkWgGoR,個r]f67ZS[r{Ɗ+N|X?x5}tV},nw̙odf~1Ed6?ws#gqXDWOmD{zmnFS1gp(˪o OuNv}A74PlD2,V-z-R%\{ JF ݫy _kǷ?QA56V4Q}hK{:q.]֜ge0o ~իgn2=Zt󔻽ikCڮ֡?l_jZΒgWaN m>mP@gPVe(}rIX" tRZSO-Ƌ|9J<2U(2هFHς5xsqx,b86rlOU4sfN cgj$QR!D ,#bJ{?m~|@ֈcYDh nu&8:9T?]tiδܲ;- M!kWZ('+ȩ0ê|b[,.KM݀/)Z5iLkLSJ#SMmZbe1vLa~G!M \p?,+ S*2ˬoHOe_ȩ"^ ?뵘ڽ 3.(s僖{!]*m(XoEa4Gᚺ3qY[,p$ @,7ݎ`-SQCׁ, 9(pM쩸AM@j#]7Hn:-ХCSžcaw'3 ƥM`bFH #'Kzck "zb0wЗmnv:&|6-vnV\ηE3,rч2I~)wbLw/%䳺2O8j.OK,ï}(tMjKkmmJl|~G?X8EUc#q5$57-k>X^dɾ_$ 4ex7ٕ3{4N^4.@8RT8"yq.h$FaZB=tI=(%ʶӿw~TRjvp.V-U;{Sd?w1v&]Rɟ`PΩ[L'+I1U  !F£SGǚ 8Mh$8aŊ⏟"R UͿ,`>գipvkpWsxtS(˻ZN8BNnK'aRV/W#Iviuz|~+f"? 9ЄSnr{~^NaڤPP|S+ !s3L-Ndvo](Ýa;eR.'SQrU%SDFQ]bZTi3a̙~($cq*C,VDIR-~i2DJwS e ǯ;DLfo*-3Jf*ہnbH4c6s-w葡kð"{&*5c^%{?UE xh1u1 M M9NEѼGh/b 3|NeJ5^L{x0AWrܳ1<dG슃c/&u13Ts*J6Dȣc|Q&|u 6\U1ͯA|"O XF G#Nd#clqj$}4XfH ﵼ7" 5s<0Ms7v2!x3MܩS,O Du\ _>4gƉ@r뼤zߧ:}e(tSĿZMu ~S|\Ao$O[qx0 %-N}n.c׃ciGw`/7҉6}Z~SB9^6^oJ )@6:T5Ν=,qL8ɒ}%n ż7gaiJ{XMoo%^@Lt;<9N/C!AQANe*6lÃZ=;:44T#z?5^n8ɲb;e9yUK±6Eʭɷ/w ȶT3 D:]FT ŭ7^\Vb=PrOS^*?'{v֣@C Ȝpv6i.k}j ((nXYRY!PyZ+:TwNrcZB1䡽ypgů*6A73R{ Tp)RT psWw$Q9cwuyhvȖLU"l%LyqS'YۣfkBŚ6ullh9>"›$'AuFQ\Jy6bf:,'\"OGa]e类X]ZU9"Ok*fN",mh̠]<h}QՙSg Ag|6ށտ>%wRx>Ю U*!TRk}fw}ϋ5n5/z;lҞF_t{=]lCe }/E/$у Cw##c(`g,6hIF rL.="yCm(/fXy^jʓ+ N`2ظExr,ېjtZ~cj8έU+|(*fXjwW|GrkcwmڦMm1.g]vabZ`3x:K!W}bp{ɏ0T-g(/Gc^ÛOju\l~ּVna +Ze)Ph2>R_^ă]wȿD-[=u2:0̵> < f7^uIyۘKK(OTWT/,Osw+< u}1GEwO/aY Lċ? ]+=1%. A` =(x?ask\܊e*& OzZSh{f-XnA|SeYdK噺xZ,]{_hN7h>67n{/e˼ >#)>8J8ܚ`" t6VwLvS*'K$[$$śb͙6#UkJk fqg8}ڬT!*s# ^eEСhUy-֢yh} zDPhqt%6^sPjW>vuW`/Y cUQjK_$v|"R`3STjBM rе)j,X5u2T5Y9^6,씤< >0u g?O,*G7|=z='6GznnMwFw > 7c,{-'A~m|$I iOT膸$0(/YJily0qC(*iҠZccItޝBDWu;? hoh꩷I Q#O5j kkt .ɰA CZ>M3H%0,Kt0=aQ^CIDf_mec+QTxnd0Zׅ"NQ {NoŹ,3Dc|=;|8(<.P9  k- nogqS{5\O-q &Ctv/QnG㿏?w tЖ\?f]/ɻ n$#)%֕hY?)V JD<)<"握苹Iak. 1k侎z'aa QZ/"#9ΒoIdfJ,Rj_9[)6M[i r[8o.Fϰ6u&e=';ːt4W41gn?"xW+vǟGFn] g/xUj 1_?gy/buy}nE^Ųc7֖V2ay] iC-;vÜuۍ7vUʵ-yQ%}2vb#[͑x3bmFwOClm\B,hF#M1вۮPD~pA2BjHxl"K*կԋfyVӌdoG]zCq::yã/*|oưn$6)BX<ȼ>KMmlM=]я:s8c =FzɽyܽsCDzfrq6yۦ3%">-Hf YpؾjiʦE!`Y7>rCj-K VsrJk$<ouG_۩)"ˉcv?F:x(RjeMxq+౼ M3:޹vIL>80pN^4 Rbhmt:p?i9tSDsX._2 Z,[нp`|u/.0|%;Sl.ݫ#k0[ԓFgWg Y3s~ENeSuӡTxZBl%2HE&Y8@ pe~!T" zȮPfsO C* Sw)bhG`.We$m 4;>k\\QƇ⫝̸oom4l|@LJ=^(.KWy׫V!WZw`v^['NS![r8`9/ \ޑPn)p,0c(;k*͔6_ xa\bdЧI'NJBk Cdϑ֍hZAȡoh'4*soD^.}/?t$&@r}Ƹ+Ź n h_8۸`'Dm"٭Lnv ǵCg'.Uƶ_Z-_Z\PkVHx:UY9vg2l-^x{Te~aኁўs[FDaI[ӄfh*t G'6arM [hGi6[&<6DfDwU5«\ی'# ֝F ebb{Ʌ(HpAc*fM|t$#6֎Aa>(ɝ]r*/}5\!zAsqW^I6G9W՚Cj-oGQU}Ek4B?NCoB?źzfvŁ\kf(ᜁ2[n;2Y"QxjX q,jïnRNIImžDkJnI*(}D>KL%QTf!sK-*iQS]`?0?]tۮlT2L:K2@\z;'z"]!K.Fm68%D\6P+NgT}$f6mQ{y{yԒQ.aIPZ`=ΘB?"􋸨nipy-0.6.1/nipy/testing/tests/000077500000000000000000000000001470056100100162655ustar00rootroot00000000000000nipy-0.6.1/nipy/testing/tests/__init__.py000066400000000000000000000000271470056100100203750ustar00rootroot00000000000000# Make tests a package nipy-0.6.1/nipy/testing/tests/test_images.py000066400000000000000000000010321470056100100211370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Test example images ''' from numpy.testing import assert_array_equal from nipy import load_image from nipy.testing import funcfile def test_dims(): fimg = load_image(funcfile) # make sure time dimension is correctly set in affine assert_array_equal(fimg.coordmap.affine[3, 3], 2.0) # should follow, but also make sure affine is invertible ainv = fimg.coordmap.inverse assert not ainv is None nipy-0.6.1/nipy/tests/000077500000000000000000000000001470056100100146105ustar00rootroot00000000000000nipy-0.6.1/nipy/tests/__init__.py000066400000000000000000000000251470056100100167160ustar00rootroot00000000000000#init for nipy/tests nipy-0.6.1/nipy/tests/scriptrunner.py000066400000000000000000000141231470056100100177210ustar00rootroot00000000000000""" Module to help tests check script output Provides class to be instantiated in tests that check scripts. Usually works something like this in a test module:: from .scriptrunner import ScriptRunner runner = ScriptRunner() Then, in the tests, something like:: code, stdout, stderr = runner.run_command(['my-script', my_arg]) assert_equal(code, 0) assert_equal(stdout, b'This script ran OK') """ import os import sys from os.path import dirname, isdir, isfile, pathsep, realpath from os.path import join as pjoin from subprocess import PIPE, Popen try: # Python 2 string_types = basestring, except NameError: # Python 3 string_types = str, def _get_package(): """ Workaround for missing ``__package__`` in Python 3.2 """ if '__package__' in globals() and __package__ is not None: return __package__ return __name__.split('.', 1)[0] # Same as __package__ for Python 2.6, 2.7 and >= 3.3 MY_PACKAGE=_get_package() def local_script_dir(script_sdir): """ Get local script directory if running in development dir, else None """ # Check for presence of scripts in development directory. ``realpath`` # allows for the situation where the development directory has been linked # into the path. package_path = dirname(__import__(MY_PACKAGE).__file__) above_us = realpath(pjoin(package_path, '..')) devel_script_dir = pjoin(above_us, script_sdir) if isfile(pjoin(above_us, 'setup.py')) and isdir(devel_script_dir): return devel_script_dir return None def local_module_dir(module_name): """ Get local module directory if running in development dir, else None """ mod = __import__(module_name) containing_path = dirname(dirname(realpath(mod.__file__))) if containing_path == realpath(os.getcwd()): return containing_path return None class ScriptRunner: """ Class to run scripts and return output Finds local scripts and local modules if running in the development directory, otherwise finds system scripts and modules. """ def __init__(self, script_sdir = 'scripts', module_sdir = MY_PACKAGE, debug_print_var = None, output_processor = lambda x : x ): """ Init ScriptRunner instance Parameters ---------- script_sdir : str, optional Name of subdirectory in top-level directory (directory containing setup.py), to find scripts in development tree. Typically 'scripts', but might be 'bin'. module_sdir : str, optional Name of subdirectory in top-level directory (directory containing setup.py), to find main package directory. debug_print_vsr : str, optional Name of environment variable that indicates whether to do debug printing or no. output_processor : callable Callable to run on the stdout, stderr outputs before returning them. Use this to convert bytes to unicode, strip whitespace, etc. """ self.local_script_dir = local_script_dir(script_sdir) self.local_module_dir = local_module_dir(module_sdir) if debug_print_var is None: debug_print_var = f'{module_sdir.upper()}_DEBUG_PRINT' self.debug_print = os.environ.get(debug_print_var, False) self.output_processor = output_processor def run_command(self, cmd, check_code=True): """ Run command sequence `cmd` returning exit code, stdout, stderr Parameters ---------- cmd : str or sequence string with command name or sequence of strings defining command check_code : {True, False}, optional If True, raise error for non-zero return code Returns ------- returncode : int return code from execution of `cmd` stdout : bytes (python 3) or str (python 2) stdout from `cmd` stderr : bytes (python 3) or str (python 2) stderr from `cmd` """ if isinstance(cmd, string_types): cmd = [cmd] else: cmd = list(cmd) if self.local_script_dir is not None: # Windows can't run script files without extensions natively so we need # to run local scripts (no extensions) via the Python interpreter. On # Unix, we might have the wrong incantation for the Python interpreter # in the hash bang first line in the source file. So, either way, run # the script through the Python interpreter cmd = [sys.executable, pjoin(self.local_script_dir, cmd[0])] + cmd[1:] if os.name == 'nt': # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be files paths with spaces. # On Unix the list elements are each separate arguments. cmd = [f'"{c}"' if ' ' in c else c for c in cmd] if self.debug_print: print(f"Running command '{cmd}'") env = os.environ if self.local_module_dir is not None: # module likely comes from the current working directory. We might need # that directory on the path if we're running the scripts from a # temporary directory env = env.copy() pypath = env.get('PYTHONPATH', None) if pypath is None: env['PYTHONPATH'] = self.local_module_dir else: env['PYTHONPATH'] = self.local_module_dir + pathsep + pypath proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env) stdout, stderr = proc.communicate() if proc.poll() is None: proc.terminate() if check_code and proc.returncode != 0: raise RuntimeError( f"""Command "{cmd}" failed with stdout ------ {stdout} stderr ------ {stderr} """) opp = self.output_processor return proc.returncode, opp(stdout), opp(stderr) nipy-0.6.1/nipy/tests/test_scripts.py000066400000000000000000000117011470056100100177100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test scripts Run scripts and test output """ import os from os.path import isfile from os.path import join as pjoin from unittest import skipIf import numpy as np import pytest from nibabel.optpkg import optional_package from numpy.testing import assert_almost_equal from nipy import load_image, save_image from nipy.core.api import rollimg from nipy.testing.decorators import make_label_dec from ..testing import funcfile matplotlib, HAVE_MPL, _ = optional_package('matplotlib') needs_mpl = skipIf(not HAVE_MPL, "Test needs matplotlib") script_test = make_label_dec('script_test') from .scriptrunner import ScriptRunner runner = ScriptRunner( debug_print_var = 'NIPY_DEBUG_PRINT') run_command = runner.run_command @needs_mpl @script_test def test_nipy_diagnose(in_tmp_path): # Test nipy diagnose script fimg = load_image(funcfile) ncomps = 12 cmd = ['nipy_diagnose', funcfile, f'--ncomponents={ncomps}', '--out-path=' + str(in_tmp_path)] run_command(cmd) for out_fname in ('components_functional.png', 'pcnt_var_functional.png', 'tsdiff_functional.png', 'vectors_components_functional.npz'): assert isfile(out_fname) for out_img in ('max_functional.nii.gz', 'mean_functional.nii.gz', 'min_functional.nii.gz', 'std_functional.nii.gz'): img = load_image(out_img) assert img.shape == fimg.shape[:-1] del img pca_img = load_image('pca_functional.nii.gz') assert pca_img.shape == fimg.shape[:-1] + (ncomps,) vecs_comps = np.load('vectors_components_functional.npz') vec_diff = vecs_comps['slice_mean_diff2'].copy()# just in case assert vec_diff.shape == (fimg.shape[-1]-1, fimg.shape[2]) # Check we can pass in slice and time flags s0_img = rollimg(fimg, 'k') save_image(s0_img, 'slice0.nii') cmd = ['nipy_diagnose', 'slice0.nii', f'--ncomponents={ncomps}', '--out-path=' + str(in_tmp_path), '--time-axis=t', '--slice-axis=0'] run_command(cmd) pca_img = load_image('pca_slice0.nii') assert pca_img.shape == s0_img.shape[:-1] + (ncomps,) vecs_comps = np.load('vectors_components_slice0.npz') assert_almost_equal(vecs_comps['slice_mean_diff2'], vec_diff) del pca_img, vecs_comps @needs_mpl @script_test def test_nipy_tsdiffana(in_tmp_path): # Test nipy_tsdiffana script out_png = 'ts_out.png' # Quotes in case of space in arguments for i, extras in enumerate(([], ['--time-axis=0'], ['--slice-axis=0'], ['--slice-axis=0', '--time-axis=1'] )): out_png = f'ts_out{i}.png' cmd = (['nipy_tsdiffana', funcfile] + extras + ['--out-file=' + out_png]) run_command(cmd) assert isfile(out_png) # Out-file and write-results incompatible cmd = (['nipy_tsdiffana', funcfile, '--out-file=' + out_png, '--write-results']) pytest.raises(RuntimeError, run_command, cmd) # Can save images cmd_root = ['nipy_tsdiffana', funcfile] os.mkdir('myresults') run_command(cmd_root + ['--out-path=myresults', '--write-results']) assert isfile(pjoin('myresults', 'tsdiff_functional.png')) assert isfile(pjoin('myresults', 'tsdiff_functional.npz')) assert isfile(pjoin('myresults', 'dv2_max_functional.nii.gz')) assert isfile(pjoin('myresults', 'dv2_mean_functional.nii.gz')) run_command(cmd_root + ['--out-path=myresults', '--write-results', '--out-fname-label=vr2']) assert isfile(pjoin('myresults', 'tsdiff_vr2.png')) assert isfile(pjoin('myresults', 'tsdiff_vr2.npz')) assert isfile(pjoin('myresults', 'dv2_max_vr2.nii.gz')) assert isfile(pjoin('myresults', 'dv2_mean_vr2.nii.gz')) @script_test def test_nipy_3_4d(in_tmp_path): # Test nipy_3dto4d and nipy_4dto3d fimg = load_image(funcfile) N = fimg.shape[-1] out_4d = 'func4d.nii' cmd = ['nipy_4dto3d', funcfile, '--out-path=' + str(in_tmp_path)] run_command(cmd) imgs_3d = ['functional_%04d.nii' % i for i in range(N)] for iname in imgs_3d: assert isfile(iname) cmd = ['nipy_3dto4d'] + imgs_3d + ['--out-4d=' + out_4d] run_command(cmd) fimg_back = load_image(out_4d) assert_almost_equal(fimg.get_fdata(), fimg_back.get_fdata()) del fimg_back @script_test def test_nipy_4d_realign(in_tmp_path): # Test nipy_4d_realign script # Set matplotib agg backend with open("matplotlibrc", "w") as fobj: fobj.write("backend : agg") cmd = ['nipy_4d_realign', '2.0', funcfile, '--slice_dim', '2', '--slice_dir', '-1', '--save_path', '.'] run_command(cmd) nipy-0.6.1/nipy/utils/000077500000000000000000000000001470056100100146065ustar00rootroot00000000000000nipy-0.6.1/nipy/utils/__init__.py000066400000000000000000000041461470056100100167240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General utilities for code support. These are modules that we (broadly-speaking) wrote; packages that other people wrote, that we ship, go in the nipy.externals tree. """ import functools import warnings import numpy as np from nibabel.data import DataError, datasource_or_bomber, make_datasource # Module level datasource instances for convenience from ..info import DATA_PKGS templates = datasource_or_bomber(DATA_PKGS['nipy-templates']) example_data = datasource_or_bomber(DATA_PKGS['nipy-data']) try: example_data.get_filename() except DataError: HAVE_EXAMPLE_DATA = False else: HAVE_EXAMPLE_DATA = True try: templates.get_filename() except DataError: HAVE_TEMPLATES = False else: HAVE_TEMPLATES = True from .utilities import is_iterable, is_numlike, seq_prod class VisibleDeprecationWarning(UserWarning): """Visible deprecation warning. Python does not show any DeprecationWarning by default. Sometimes we do want to show a deprecation warning, when the deprecation is urgent, or the usage is probably a bug. """ class _NoValue: """Special keyword value. This class may be used as the default value assigned to a deprecated keyword in order to check if it has been given a user defined value. """ # Numpy sctypes (np.sctypes removed in Numpy 2.0). SCTYPES = {'int': [np.int8, np.int16, np.int32, np.int64], 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], 'float': [np.float16, np.float32, np.float64], 'complex': [np.complex64, np.complex128], 'others': [bool, object, bytes, str, np.void]} def deprecate_with_doc(msg): # Adapted from: https://stackoverflow.com/a/30253848/1939576 def dep(func): @functools.wraps(func) def new_func(*args, **kwargs): warnings.warn( f"{func.__name__} deprecated, {msg}", category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return new_func return dep nipy-0.6.1/nipy/utils/arrays.py000066400000000000000000000022271470056100100164640ustar00rootroot00000000000000""" Array utilities """ import numpy as np def strides_from(shape, dtype, order='C'): """ Return strides as for continuous array shape `shape` and given `dtype` Parameters ---------- shape : sequence shape of array to calculate strides from dtype : dtype-like dtype specifier for array order : {'C', 'F'}, optional whether array is C or FORTRAN ordered Returns ------- strides : tuple sequence length ``len(shape)`` giving strides for continuous array with given `shape`, `dtype` and `order` Examples -------- >>> strides_from((2,3,4), 'i4') (48, 16, 4) >>> strides_from((3,2), np.float64) (16, 8) >>> strides_from((5,4,3), np.bool_, order='F') (1, 5, 20) """ dt = np.dtype(dtype) if dt.itemsize == 0: raise ValueError(f'Empty dtype "{dt}"') if order == 'F': strides = np.cumprod([dt.itemsize] + list(shape[:-1])) elif order == 'C': strides = np.cumprod([dt.itemsize] + list(shape)[::-1][:-1]) strides = strides[::-1] else: raise ValueError(f'Unexpected order "{order}"') return tuple(strides) nipy-0.6.1/nipy/utils/perlpie.py000066400000000000000000000075711470056100100166320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: r"""Perform a global search and replace on the current directory *recursively*. This a small python wrapper around the `perl -p -i -e` functionality. I **strongly recommend** running `perlpie` on files under source control. In this way it's easy to track your changes and if you discover your regular expression was wrong you can easily revert. I also recommend using `grin` to test your regular expressions before running `perlpie`. Parameters ---------- regex : regular expression Regular expression matching the string you want to replace newstring : string The string you would like to replace the oldstring with. Note this is not a regular expression but the exact string. One exception to this rule is the at symbol `@`. This has special meaning in perl, so you need an escape character for this. See Examples below. Requires -------- perl : The underlying language we're using to perform the search and replace. `grin `_ : Grin is a tool written by Robert Kern to wrap `grep` and `find` with python and easier command line options. Examples -------- Replace all occurrences of foo with bar:: perlpie foo bar Replace numpy.testing with nipy's testing framework:: perlpie 'from\s+numpy\.testing.*' 'from nipy.testing import *' Replace all @slow decorators in my code with @dec.super_slow. Here we have to escape the @ symbol which has special meaning in perl:: perlpie '\@slow' '\@dec.super_slow' Remove all occurrences of importing make_doctest_suite:: perlpie 'from\snipy\.utils\.testutils.*make_doctest_suite' """ # notes on perl-dash-pie # perl -p -i -e 's/oldstring/newstring/g' * # find . -name '*.html' -print0 | xargs -0 perl -pi -e 's/oldstring/newstring/g' import subprocess from optparse import OptionParser usage_doc = "usage: %prog [options] regex newstring" def check_deps(): try: import grin except ImportError: print('perlpie requires grin to operate.') print('You can find grin in the python package index:') print(' http://pypi.python.org/pypi/grin/') return False # assume they have perl for now return True def perl_dash_pie(oldstr, newstr, dry_run=None): r"""Use perl to replace the oldstr with the newstr. Examples -------- # To replace all occurrences of 'import numpy as N' with 'import numpy as np' from nipy.utils import perlpie perlpie.perl_dash_pie(r'import\s+numpy\s+as\s+N', 'import numpy as np') grind | xargs perl -pi -e 's/import\s+numpy\s+as\s+N/import numpy as np/g' """ if dry_run: cmd = f"grind | xargs perl -p -e 's/{oldstr}/{newstr}/g'" else: cmd = f"grind | xargs perl -pi -e 's/{oldstr}/{newstr}/g'" print(cmd) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as err: msg = f""" Error while executing perl_dash_pie command: {cmd} Error: {err} """ raise Exception(msg) def print_extended_help(option, opt_str, value, parser, *args, **kwargs): print(__doc__) def main(): description = __doc__.splitlines()[0] usage = usage_doc parser = OptionParser(usage=usage, description=description) parser.add_option('-e', '--extended-help', action='callback', callback=print_extended_help, help='print extended help including examples') parser.add_option('-n', '--dry-run', action="store_true", dest="dry_run", help='send results to stdout without modifying files') (options, args) = parser.parse_args() if not args: parser.print_help() return if check_deps(): oldstr = args[0] newstr = args[1] perl_dash_pie(oldstr, newstr, options.dry_run) nipy-0.6.1/nipy/utils/tests/000077500000000000000000000000001470056100100157505ustar00rootroot00000000000000nipy-0.6.1/nipy/utils/tests/__init__.py000066400000000000000000000022511470056100100200610ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Please write tests for all code submitted to the repository. The code will be used by many people, and will in due course be used in live analyses, so we need to make sure that we have the best possible defenses against bugs. It also helps us think about code interfaces, and gives examples of code use that can be useful for others using the code. Python's unit testing framework (the U{unittest} module) is used to implement project tests. We use the convention that each package contains a subpackage called tests which contains modules defining test cases (subclasses of U{unittest.TestCase}) for that package. The nipy.utils.tests package contains an example test case called L{test_template.TemplateTest} to get you started writing your tests. Please try to include working test cases for all functions and classes that you contribute. Often, writing tests for your code before the code is written helps to frame your thoughts about what the code should look like. """ nipy-0.6.1/nipy/utils/tests/test_arrays.py000066400000000000000000000017771470056100100206760ustar00rootroot00000000000000""" Testing arrays module """ from itertools import chain import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from nipy.utils import SCTYPES from ..arrays import strides_from def test_strides_from(): for shape in ((3,), (2,3), (2,3,4), (5,4,3,2)): for order in 'FC': for dtype in chain.from_iterable(SCTYPES.values()): if dtype is bytes: dtype = 'S3' elif dtype is str: dtype = 'U4' elif dtype is np.void: continue exp = np.empty(shape, dtype=dtype, order=order).strides assert strides_from(shape, dtype, order) == exp pytest.raises(ValueError, strides_from, shape, np.void, order) pytest.raises(ValueError, strides_from, shape, bytes, order) pytest.raises(ValueError, strides_from, shape, str, order) pytest.raises(ValueError, strides_from, (3,2), 'f8', 'G') nipy-0.6.1/nipy/utils/tests/test_utilities.py000066400000000000000000000021521470056100100213740ustar00rootroot00000000000000""" Testing utilities module """ import numpy as np from ..utilities import is_iterable, is_numlike, seq_prod def test_is_iterable(): assert is_iterable(()) assert is_iterable([]) assert is_iterable(np.zeros(1)) assert is_iterable(np.zeros((1, 1))) assert is_iterable('') assert not is_iterable(0) assert not is_iterable(object()) def gen(): yield 1 assert is_iterable(gen()) def func(): return 1 assert not is_iterable(func) class C: def __iter__(self): return self def __next__(self): return self assert is_iterable(C()) def test_is_numlike(): for good in (1, 0, 1.1, False, True, np.zeros(1), np.zeros((3,)), 1j, np.complex128(1)): assert is_numlike(good) for bad in ('', object(), np.array(''), [], [1], (), (1,)): assert not is_numlike(bad) def test_seq_prod(): assert seq_prod(()) == 1 assert seq_prod((), 2) == 2 assert seq_prod((1,)) == 1 assert seq_prod((1, 2)) == 2 assert seq_prod((1, 2), 2) == 4 assert seq_prod((1, 2), 2.) == 4. nipy-0.6.1/nipy/utils/utilities.py000066400000000000000000000015231470056100100171740ustar00rootroot00000000000000""" Collection of utility functions and classes Some of these come from the matplotlib ``cbook`` module with thanks. """ from functools import reduce from operator import mul def is_iterable(obj): """ Return True if `obj` is iterable """ try: iter(obj) except TypeError: return False return True def is_numlike(obj): """ Return True if `obj` looks like a number """ try: obj + 1 except: return False return True def seq_prod(seq, initial=1): """ General product of sequence elements Parameters ---------- seq : sequence Iterable of values to multiply. initial : object, optional Initial value Returns ------- prod : object Result of ``initial * seq[0] * seq[1] .. ``. """ return reduce(mul, seq, initial) nipy-0.6.1/pyproject.toml000066400000000000000000000054121470056100100154050ustar00rootroot00000000000000[project] name = "nipy" dynamic = ['version'] license = {file = "LICENSE"} requires-python = ">=3.8" description = 'A python package for analysis of neuroimaging data' readme = 'README.rst' classifiers = ["Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering"] dependencies = [ 'numpy>=1.22', 'scipy>=1.8', 'nibabel>=3.2', 'sympy>=1.9', 'transforms3d' ] [[project.maintainers]] name = 'nipy developers' email = 'neuroimaging@python.org' [project.urls] Homepage = 'https://nipy.org/nipy' Documentation = 'http://nipy.org/nipy/documentation.html' Source = 'https://github.com/nipy/nipy' Download = 'https://pypi.org/project/nipy/#files' Tracker = 'https://github.com/nipy/nipy/issues' [project.optional-dependencies] developer = [ 'pre-commit', 'rtoml', ] docs = [ 'sphinx>=7.0', 'numpydoc>=1.6.0', 'matplotlib', 'texext', 'ipython' ] optional = [ 'matplotlib>=3', ] test = [ 'matplotlib>=3', 'pytest>=7.2', 'pytest-cov>=4.0', 'pytest-doctestplus' ] [build-system] build-backend = "mesonpy" requires = [ "meson-python>=0.13", "ninja", "setuptools", "cython>=3", # From Numpy 1.25, Numpy is always backwards compatible for any given Python # version. See: # https://numpy.org/doc/stable/release/1.25.0-notes.html#compiling-against-the-numpy-c-api-is-now-backwards-compatible-by-default "numpy>=1.25; python_version > '3.8'", # SPEC0-minimum as of Dec 23, 2023 "numpy==1.22; python_version <= '3.8'", ] [project.scripts] nipy_3dto4d = 'nipy.cli.img3dto4d:main' nipy_4dto3d = 'nipy.cli.img4dto3d:main' nipy_4d_realign = 'nipy.cli.realign4d:main' nipy_tsdiffana = 'nipy.cli.tsdiffana:main' nipy_diagnose = 'nipy.cli.diagnose:main' [tool.ruff] line-length = 88 [tool.ruff.lint] select = [ 'I', 'UP', 'C4', 'E713', 'PIE', 'PGH003', 'PLR0402', 'SIM101', 'SIM109', 'SIM110', 'SIM118', 'SIM2' ] [tool.spin] package = 'nipy' [tool.spin.commands] Build = [ 'spin.cmds.meson.build', 'spin.cmds.meson.test', 'spin.cmds.pip.install' ] Environments = [ 'spin.cmds.meson.ipython', 'spin.cmds.meson.python', 'spin.cmds.meson.run' ] Debug = [ 'spin.cmds.meson.gdb', 'spin.cmds.meson.lldb' ] nipy-0.6.1/requirements.txt000066400000000000000000000001551470056100100157540ustar00rootroot00000000000000# See pyproject.toml for requirement definitions numpy>=1.22 scipy>=1.8 sympy>=1.9 nibabel>=3.2 transforms3d nipy-0.6.1/setup.cfg000066400000000000000000000013051470056100100143070ustar00rootroot00000000000000[aliases] #release = egg_info -RDb '' # Make sure the sphinx docs are built each time we do a dist. #bdist = build_sphinx bdist #sdist = build_sphinx sdist # Make sure a zip file is created each time we build the sphinx docs #build_sphinx = build_sphinx zip_help [bdist_rpm] doc_files = doc # In this section specify if you want to link to external BLAS / LAPACK [lapack] # Value of 0 or False implies compile of, link to lapack_lite # Value of 1 or True will cause setup to try and link to external BLAS / # LAPACK as identified with the numpy configuration. Default is False. # The value in this file overrides the equivalent setting in the environment # variable NIPY_EXTERNAL_LAPACK. #external = False nipy-0.6.1/site.cfg.mingw32000066400000000000000000000001261470056100100154000ustar00rootroot00000000000000[DEFAULT] library_dirs = c:\mingw\lib include_dirs = c:\mingw\include libraries = g2c nipy-0.6.1/tools/000077500000000000000000000000001470056100100136275ustar00rootroot00000000000000nipy-0.6.1/tools/README000066400000000000000000000001441470056100100145060ustar00rootroot00000000000000============ Nipy Tools ============ This directory contains various tools used by us developers. nipy-0.6.1/tools/apigen.py000066400000000000000000000365621470056100100154600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Attempt to generate templates for module reference with Sphinx XXX - we exclude extension modules To include extension modules, first identify them as valid in the ``_uri2path`` method, then handle them in the ``_parse_module`` script. We get functions and classes by parsing the text of .py files. Alternatively we could import the modules for discovery, and we'd have to do that for extension modules. This would involve changing the ``_parse_module`` method to work via import and introspection, and might involve changing ``discover_modules`` (which determines which files are modules, and therefore which module URIs will be passed to ``_parse_module``). NOTE: this is a modified version of a script originally shipped with the PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project. NOTE2: this script should run in Python 2 and Python 3 """ # Stdlib imports import os import re # Functions and classes class ApiDocWriter: ''' Class for automatic detection and parsing of API docs to Sphinx-parsable reST format''' # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] def __init__(self, package_name, rst_extension='.rst', package_skip_patterns=None, module_skip_patterns=None, ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] ''' if package_skip_patterns is None: package_skip_patterns = [r'\.tests$'] if module_skip_patterns is None: module_skip_patterns = [r'\.setup$', r'\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns def get_package_name(self): return self._package_name def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: # nothing that we could handle here. return ([],[]) f = open(filename) functions, classes = self._parse_lines(f) f.close() return functions, classes def _parse_lines(self, linesource): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc ''' # get the names of all classes and functions functions, classes = self._parse_module(uri) if not len(functions) and not len(classes): print('WARNING: Empty - ' + uri) # dbg return '' # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(rf'^{self.package_name}\.','',uri) ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + '\n\n') # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri: title = 'Module: :mod:`' + uri_short + '`' else: title = ':mod:`' + uri_short + '`' ad += title + '\n' + self.rst_section_levels[2] * len(title) if len(classes): ad += f'\nInheritance diagram for ``{uri}``:\n\n' ad += f'.. inheritance-diagram:: {uri} \n' ad += ' :parts: 3\n' ad += '\n.. automodule:: ' + uri + '\n' ad += '\n.. currentmodule:: ' + uri + '\n' multi_class = len(classes) > 1 multi_fx = len(functions) > 1 if multi_class: ad += '\n' + 'Classes' + '\n' + \ self.rst_section_levels[2] * 7 + '\n' elif len(classes) and multi_fx: ad += '\n' + 'Class' + '\n' + \ self.rst_section_levels[2] * 5 + '\n' for c in classes: ad += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[multi_class + 2 ] * \ (len(c)+9) + '\n\n' ad += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working ad += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ ' :inherited-members:\n' \ '\n' \ ' .. automethod:: __init__\n' if multi_fx: ad += '\n' + 'Functions' + '\n' + \ self.rst_section_levels[2] * 9 + '\n\n' elif len(functions) and multi_class: ad += '\n' + 'Function' + '\n' + \ self.rst_section_levels[2] * 8 + '\n\n' for f in functions: # must NOT exclude from index to keep cross-refs working ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' return ad def _survives_exclude(self, matchstr, match_type): r''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append(r'^\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append(r'^\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns else: raise ValueError(f'Cannot interpret match type "{match_type}"') # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace package_uri = f'{root_uri}.{dirname}' if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) else: dirnames.remove(dirname) # Check filenames for modules for filename in filenames: module_name = filename[:-3] module_uri = f'{root_uri}.{module_name}' if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) return sorted(modules) def write_modules_api(self, modules,outdir): # write the list written_modules = [] for m in modules: api_str = self.generate_api_doc(m) if not api_str: continue # write out to file outfile = os.path.join(outdir, m + self.rst_extension) fileobj = open(outfile, "w") fileobj.write(api_str) fileobj.close() written_modules.append(m) self.written_modules = written_modules def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir) def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot+self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = outdir.replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path,"w") w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w('.. toctree::\n\n') for f in self.written_modules: w(f' {os.path.join(relpath, f)}\n') idx.close() nipy-0.6.1/tools/build_dmgs.py000077500000000000000000000044371470056100100163250ustar00rootroot00000000000000#!/usr/bin/env python3 """Script to build dmgs for buildbot builds Example ------- %(prog)s "nipy-dist/nipy*-0.4.0-py*mpkg" Note quotes around the globber first argument to protect it from shell globbing. """ import os import shutil import warnings from argparse import ArgumentParser, RawDescriptionHelpFormatter from functools import partial from glob import glob from os.path import isdir, isfile from os.path import join as pjoin from subprocess import check_call my_call = partial(check_call, shell=True) BUILDBOT_LOGIN = "buildbot@nipy.bic.berkeley.edu" BUILDBOT_HTML = "nibotmi/public_html/" def main(): parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('globber', type=str, help='glob to search for build mpkgs') parser.add_argument('--out-path', type=str, default='mpkg-dist', help='path for output files (default="mpkg-dist")', metavar='OUTPATH') parser.add_argument('--clobber', action='store_true', help='Delete OUTPATH if exists') args = parser.parse_args() globber = args.globber out_path = args.out_path address = f"{BUILDBOT_LOGIN}:{BUILDBOT_HTML}{globber}" if isdir(out_path): if not args.clobber: raise RuntimeError(f'Path {out_path} exists and "clobber" not set') shutil.rmtree(out_path) os.mkdir(out_path) cwd = os.path.abspath(os.getcwd()) os.chdir(out_path) try: my_call(f'scp -r {address} .') found_mpkgs = sorted(glob('*.mpkg')) for mpkg in found_mpkgs: pkg_name, ext = os.path.splitext(mpkg) assert ext == '.mpkg' my_call(f'sudo reown_mpkg {mpkg} root admin') os.mkdir(pkg_name) pkg_moved = pjoin(pkg_name, mpkg) os.rename(mpkg, pkg_moved) readme = pjoin(pkg_moved, 'Contents', 'Resources', 'ReadMe.txt') if isfile(readme): shutil.copy(readme, pkg_name) else: warnings.warn("Could not find readme with " + readme) my_call(f'sudo hdiutil create {pkg_name}.dmg -srcfolder ./{pkg_name}/ -ov') finally: os.chdir(cwd) if __name__ == '__main__': main() nipy-0.6.1/tools/build_modref_templates.py000077500000000000000000000017641470056100100207250ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to auto-generate our API docs. This script should run in Python 2 and Python 3 """ # stdlib imports import os # local imports from apigen import ApiDocWriter #***************************************************************************** if __name__ == '__main__': package = 'nipy' outdir = os.path.join('api','generated') docwriter = ApiDocWriter(package) docwriter.package_skip_patterns += [r'\.fixes$', #r'\.labs\.viz', ] # XXX: Avoid nipy.modalities.fmri.aliased due to a bug in python2.6 docwriter.module_skip_patterns += [r'\.modalities\.fmri.aliased', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'gen', relative_to='api') print(f'{len(docwriter.written_modules)} files written') nipy-0.6.1/tools/doctest_extmods.py000077500000000000000000000032211470056100100174120ustar00rootroot00000000000000#!/usr/bin/env python3 """Run doctests in extension modules of Collect extension modules in Run doctests in each extension module Example: %prog nipy """ import doctest import os import sys from distutils.sysconfig import get_config_vars from optparse import OptionParser from os.path import abspath, dirname, relpath, sep, splitext from os.path import join as pjoin EXT_EXT = get_config_vars('SO')[0] def get_ext_modules(pkg_name): pkg = __import__(pkg_name, fromlist=['']) pkg_dir = abspath(dirname(pkg.__file__)) # pkg_root = __import__(pkg_name) ext_modules = [] for dirpath, dirnames, filenames in os.walk(pkg_dir): reldir = relpath(dirpath, pkg_dir) if reldir == '.': reldir = '' for filename in filenames: froot, ext = splitext(filename) if ext == EXT_EXT: mod_path = pjoin(reldir, froot) mod_uri = pkg_name + '.' + mod_path.replace(sep, '.') # fromlist=[''] results in submodule being returned, rather than the # top level module. See help(__import__) mod = __import__(mod_uri, fromlist=['']) ext_modules.append(mod) return ext_modules def main(): usage = "usage: %prog [options] \n\n" + __doc__ parser = OptionParser(usage=usage) opts, args = parser.parse_args() if len(args) == 0: parser.print_help() sys.exit(1) mod_name = args[0] mods = get_ext_modules(mod_name) for mod in mods: print("Testing module: " + mod.__name__) doctest.testmod(mod) if __name__ == '__main__': main() nipy-0.6.1/tools/fix_longtable.py000077500000000000000000000011431470056100100170200ustar00rootroot00000000000000#!/usr/bin/env python3 """ Fix sphinx latex output for longtable """ import codecs import re import sys lt_LL = re.compile( r"longtable}{(L+)}") def replacer(match): args = '|' + 'l|' * len(match.groups()[0]) return f"longtable}}{{{args}}}" if len(sys.argv) != 2: raise RuntimeError("Enter path to tex file only") file_path = sys.argv[1] with codecs.open(file_path, 'r', encoding='utf8') as fobj: unfixed_tex = fobj.readlines() with codecs.open(file_path, 'w', encoding='utf8') as fobj: for line in unfixed_tex: line = lt_LL.sub(replacer, line, 1) fobj.write(line) nipy-0.6.1/tools/gitwash_dumper.py000077500000000000000000000172741470056100100172410ustar00rootroot00000000000000#!/usr/bin/env python3 ''' Checkout gitwash repo into directory and do search replace on name ''' import fnmatch import glob import os import re import shutil import sys import tempfile from optparse import OptionParser from os.path import join as pjoin from subprocess import call verbose = False def clone_repo(url, branch): cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: cmd = f'git clone {url} {tmpdir}' call(cmd, shell=True) os.chdir(tmpdir) cmd = f'git checkout {branch}' call(cmd, shell=True) except: shutil.rmtree(tmpdir) raise finally: os.chdir(cwd) return tmpdir def cp_files(in_path, globs, out_path): try: os.makedirs(out_path) except OSError: pass out_fnames = [] for in_glob in globs: in_glob_path = pjoin(in_path, in_glob) for in_fname in glob.glob(in_glob_path): out_fname = in_fname.replace(in_path, out_path) pth, _ = os.path.split(out_fname) if not os.path.isdir(pth): os.makedirs(pth) shutil.copyfile(in_fname, out_fname) out_fnames.append(out_fname) return out_fnames def filename_search_replace(sr_pairs, filename, backup=False): ''' Search and replace for expressions in files ''' with open(filename) as in_fh: in_txt = in_fh.read(-1) out_txt = in_txt[:] for in_exp, out_exp in sr_pairs: in_exp = re.compile(in_exp) out_txt = in_exp.sub(out_exp, out_txt) if in_txt == out_txt: return False with open(filename, "w") as out_fh: out_fh.write(out_txt) if backup: with open(filename + '.bak', "w") as bak_fh: bak_fh.write(in_txt) return True def copy_replace(replace_pairs, repo_path, out_path, cp_globs=('*',), rep_globs=('*',), renames = ()): out_fnames = cp_files(repo_path, cp_globs, out_path) renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] fnames = [] for rep_glob in rep_globs: fnames += fnmatch.filter(out_fnames, rep_glob) if verbose: print('\n'.join(fnames)) for fname in fnames: filename_search_replace(replace_pairs, fname, False) for in_exp, out_exp in renames: new_fname, n = in_exp.subn(out_exp, fname) if n: os.rename(fname, new_fname) break def make_link_targets(proj_name, user_name, repo_name, known_link_fname, out_link_fname, url=None, ml_url=None): """ Check and make link targets If url is None or ml_url is None, check if there are links present for these in `known_link_fname`. If not, raise error. The check is: Look for a target `proj_name`. Look for a target `proj_name` + ' mailing list' Also, look for a target `proj_name` + 'github'. If this exists, don't write this target into the new file below. If we are writing any of the url, ml_url, or github address, then write new file with these links, of form: .. _`proj_name` .. _`proj_name`: url .. _`proj_name` mailing list: url """ with open(known_link_fname) as link_fh: link_contents = link_fh.readlines() have_url = not url is None have_ml_url = not ml_url is None have_gh_url = None for line in link_contents: if not have_url: match = re.match(rf'..\s+_`{proj_name}`:\s+', line) if match: have_url = True if not have_ml_url: match = re.match(rf'..\s+_`{proj_name} mailing list`:\s+', line) if match: have_ml_url = True if not have_gh_url: match = re.match(rf'..\s+_`{proj_name} github`:\s+', line) if match: have_gh_url = True if not have_url or not have_ml_url: raise RuntimeError('Need command line or known project ' 'and / or mailing list URLs') lines = [] if not url is None: lines.append(f'.. _`{proj_name}`: {url}\n') if not have_gh_url: gh_url = f'https://github.com/{user_name}/{repo_name}\n' lines.append(f'.. _`{proj_name} github`: {gh_url}\n') if not ml_url is None: lines.append(f'.. _`{proj_name} mailing list`: {ml_url}\n') if len(lines) == 0: # Nothing to do return # A neat little header line lines = [f'.. {proj_name}\n'] + lines with open(out_link_fname, "w") as out_links: out_links.writelines(lines) USAGE = ''' If not set with options, the repository name is the same as the If not set with options, the main github user is the same as the repository name.''' GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' GITWASH_BRANCH = 'main' def main(): parser = OptionParser() parser.set_usage(parser.get_usage().strip() + USAGE) parser.add_option("--repo-name", dest="repo_name", help="repository name - e.g. nitime", metavar="REPO_NAME") parser.add_option("--github-user", dest="main_gh_user", help="github username for main repo - e.g fperez", metavar="MAIN_GH_USER") parser.add_option("--gitwash-url", dest="gitwash_url", help=f"URL to gitwash repository - default {GITWASH_CENTRAL}", default=GITWASH_CENTRAL, metavar="GITWASH_URL") parser.add_option("--gitwash-branch", dest="gitwash_branch", help=f"branch in gitwash repository - default {GITWASH_BRANCH}", default=GITWASH_BRANCH, metavar="GITWASH_BRANCH") parser.add_option("--source-suffix", dest="source_suffix", help="suffix of ReST source files - default '.rst'", default='.rst', metavar="SOURCE_SUFFIX") parser.add_option("--project-url", dest="project_url", help="URL for project web pages", default=None, metavar="PROJECT_URL") parser.add_option("--project-ml-url", dest="project_ml_url", help="URL for project mailing list", default=None, metavar="PROJECT_ML_URL") (options, args) = parser.parse_args() if len(args) < 2: parser.print_help() sys.exit() out_path, project_name = args if options.repo_name is None: options.repo_name = project_name if options.main_gh_user is None: options.main_gh_user = options.repo_name repo_path = clone_repo(options.gitwash_url, options.gitwash_branch) try: copy_replace((('PROJECTNAME', project_name), ('REPONAME', options.repo_name), ('MAIN_GH_USER', options.main_gh_user)), repo_path, out_path, cp_globs=(pjoin('gitwash', '*'),), rep_globs=('*.rst',), renames=(('\.rst$', options.source_suffix),)) make_link_targets(project_name, options.main_gh_user, options.repo_name, pjoin(out_path, 'gitwash', 'known_projects.inc'), pjoin(out_path, 'gitwash', 'this_project.inc'), options.project_url, options.project_ml_url) finally: shutil.rmtree(repo_path) if __name__ == '__main__': main() nipy-0.6.1/tools/perlpie000077500000000000000000000002301470056100100152100ustar00rootroot00000000000000#!/usr/bin/env python3 """Utility script for performing global search and replace with `perl -p ie` """ from nipy.utils import perlpie perlpie.main() nipy-0.6.1/tools/run_log_examples.py000077500000000000000000000132731470056100100175550ustar00rootroot00000000000000#!/usr/bin/env python3 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: DESCRIP = 'Run and log examples' EPILOG = \ """ Run examples in directory Typical usage is: run_log_examples.py nipy/examples --log-path=~/tmp/eg_logs to run the examples and log the result, or run_log_examples.py nipy/examples/some_example.py to run a single example. """ import os import re import sys from argparse import ArgumentParser, RawDescriptionHelpFormatter from os.path import abspath, dirname, expanduser, isfile from os.path import join as pjoin from os.path import sep as psep from subprocess import PIPE, Popen PYTHON=sys.executable NEED_SHELL = True class ProcLogger: def __init__(self, log_path, working_path): self.log_path = log_path self.working_path = working_path self._names = [] def cmd_str_maker(self, cmd, args): return " ".join([cmd] + list(args)) def __call__(self, cmd_name, cmd, args=(), cwd=None): # Mqke log files if cmd_name in self._names: raise ValueError(f'Command name {cmd_name} not unique') self._names.append(cmd_name) if cwd is None: cwd = self.working_path cmd_out_path = pjoin(self.log_path, cmd_name) stdout_log = open(cmd_out_path + '.stdout', "w") stderr_log = open(cmd_out_path + '.stderr', "w") try: # Start subprocess cmd_str = self.cmd_str_maker(cmd, args) proc = Popen(cmd_str, cwd = cwd, stdout = stdout_log, stderr = stderr_log, shell = NEED_SHELL) # Execute retcode = proc.wait() finally: if proc.poll() is None: # In case we get killed proc.terminate() stdout_log.close() stderr_log.close() return retcode def run_pipes(self, cmd, args=(), cwd=None): if cwd is None: cwd = self.working_path try: # Start subprocess cmd_str = self.cmd_str_maker(cmd, args) proc = Popen(cmd_str, cwd = cwd, stdout = PIPE, stderr = PIPE, shell = NEED_SHELL) # Execute stdout, stderr = proc.communicate() finally: if proc.poll() is None: # In case we get killed proc.terminate() return stdout.decode(), stderr.decode(), proc.returncode class PyProcLogger(ProcLogger): def cmd_str_maker(self, cmd, args): """ Execute python script `cmd` Reject any `args` because we're using ``exec`` to execute the script. Prepend some matplotlib setup to suppress figures """ if len(args) != 0: raise ValueError(f"Cannot use args with {self.__class__}") return(f"""{PYTHON} -c "import matplotlib as mpl; mpl.use('agg'); """ f"""exec(open('{cmd}', 'rt').read())" """) def _record(result, fname, fileobj): print(result) fileobj.write(f'{fname}: {result}\n') def main(): parser = ArgumentParser(description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('examples_path', type=str, help='filename of example or directory containing ' 'examples to run') parser.add_argument('--log-path', type=str, default='', help='path for output logs (default is cwd)') parser.add_argument('--excludex', type=str, action='append', default=[], help='regex for files to exclude (add more than one ' '--excludex option for more than one regex filter') args = parser.parse_args() # Proc runner eg_path = abspath(expanduser(args.examples_path)) if args.log_path == '': log_path = abspath(os.getcwd()) else: log_path = abspath(expanduser(args.log_path)) excludexes = [re.compile(s) for s in args.excludex] if isfile(eg_path): # example was a file proc_logger = PyProcLogger(log_path=log_path, working_path=dirname(eg_path)) print("Running " + eg_path) stdout, stderr, code = proc_logger.run_pipes(eg_path) print('==== Stdout ====') print(stdout) print('==== Stderr ====') print(stderr) sys.exit(code) # Multi-run with logging to file proc_logger = PyProcLogger(log_path=log_path, working_path=eg_path) fails = 0 with open(pjoin(log_path, 'summary.txt'), "w") as f: for dirpath, dirnames, filenames in os.walk(eg_path): for fname in filenames: full_fname = pjoin(dirpath, fname) if fname.endswith(".py"): print(fname, end=': ') sys.stdout.flush() for excludex in excludexes: if excludex.search(fname): _record('SKIP', fname, f) break else: # run test cmd_name = full_fname.replace(eg_path + psep, '') cmd_name = cmd_name.replace(psep, '-') code = proc_logger(cmd_name, full_fname, cwd=dirpath) if code == 0: _record('OK', fname, f) else: fails += 1 _record('FAIL', fname, f) sys.exit(min(255, fails)) if __name__ == '__main__': main() nipy-0.6.1/tools/show_version_info.sh000077500000000000000000000026301470056100100177270ustar00rootroot00000000000000#!/bin/bash # Check returned versions for: # # * Install from git zip archive # * Standard install # * Local editable install. PYTHON=${PYTHON:-python3} VER_CHK_TMP=${PWD}/version_check_tmp VER_FILE=${VER_CHK_TMP}/version_checks.txt VENV_DIR=${VER_CHK_TMP}/venv ZIP_FNAME=${VER_CHK_TMP}/project.zip ZIP_DIR=${VER_CHK_TMP}/unpacked_zip function mk_newdir { rm -rf $1 mkdir $1 } function extra_install { # Extra steps prior to pip install. # Will differ by package. pip install numpy } function install_show { local pkg_name=$1 [ -z "$pkg_name" ] && (echo "Need package name" && exit 1) shift local inst_type=$1 [ -z "$inst_type" ] && (echo "Need installation type" && exit 2) shift mk_newdir ${VENV_DIR} ${PYTHON} -m virtualenv ${VENV_DIR} ( . ${VENV_DIR}/bin/activate && \ extra_install && \ pip install $@ && \ local pkg_ver=$(python -c "import $pkg_name; print(${pkg_name}.__version__)") && \ echo "${pkg_name} - ${inst_type}: ${pkg_ver}" >> ${VER_FILE} && \ deactivate ) } mk_newdir ${VER_CHK_TMP} cat << EOF > ${VER_FILE} ######## Versions ######## EOF # Git zip archive git archive --format zip -o $ZIP_FNAME HEAD mk_newdir $ZIP_DIR (cd $ZIP_DIR && unzip $ZIP_FNAME) install_show nipy zip ${ZIP_DIR} # Standard install install_show nipy install . # Local editable install install_show nipy editable -e . cat ${VER_FILE} nipy-0.6.1/tools/touch_cython_cs.py000077500000000000000000000023601470056100100174000ustar00rootroot00000000000000#!/usr/bin/env python3 """ Refresh modification times on Cython generated C files This is sometimes necessary on windows when the git checkout appears to sometimes checkout the C files with modification times earlier than the pyx files, triggering an attempt to rebuild the C files with Cython when running a build. """ import optparse import os import sys from os.path import isfile, splitext from os.path import join as pjoin def touch(fname, times=None, ns=None, dir_fd=None): with os.open(fname, os.O_APPEND, dir_fd=dir_fd) as f: os.utime(f.fileno() if os.utime in os.supports_fd else fname, times=times, ns=ns, dir_fd=dir_fd) def main(): parser = optparse.OptionParser(usage='%prog []') (opts, args) = parser.parse_args() if len(args) > 1: parser.print_help() sys.exit(-1) elif len(args) == 1: root_dir = args[0] else: root_dir = os.getcwd() for dirpath, dirnames, filenames in os.walk(root_dir): for fn in filenames: if fn.endswith('.pyx'): froot, ext = splitext(fn) cfile = pjoin(dirpath, froot + '.c') if isfile(cfile): touch(cfile) if __name__ == '__main__': main() nipy-0.6.1/tools/travis_tools.sh000066400000000000000000000012071470056100100167130ustar00rootroot00000000000000# Tools for working with travis-ci export WHEELHOST="travis-wheels.scikit-image.org" export WHEELHOUSE="http://${WHEELHOST}/" retry () { # https://gist.github.com/fungusakafungus/1026804 local retry_max=5 local count=$retry_max while [ $count -gt 0 ]; do "$@" && break count=$(($count - 1)) sleep 1 done [ $count -eq 0 ] && { echo "Retry failed [$retry_max]: $@" >&2 return 1 } return 0 } wheelhouse_pip_install() { # Install pip requirements via travis wheelhouse retry pip install --timeout=60 --no-index --trusted-host $WHEELHOST --find-links $WHEELHOUSE $@ } nipy-0.6.1/tools/upload-gh-pages.sh000077500000000000000000000015671470056100100171540ustar00rootroot00000000000000#!/bin/bash # Upload website to gh-pages USAGE="$0 []" HTML_DIR=$1 if [ -z "$HTML_DIR" ]; then echo $USAGE exit 1 fi if [ ! -e "$HTML_DIR/index.html" ]; then echo "$HTML_DIR does not contain an index.html" exit 1 fi if [ -d "$HTML_DIR/.git" ]; then echo "$HTML_DIR already contains a .git directory" exit 1 fi PROJECT=$2 if [ -z "$PROJECT" ]; then echo $USAGE exit 1 fi ORGANIZATION=$3 if [ -z "$ORGANIZATION" ]; then ORGANIZATION=nipy fi upstream_repo="https://github.com/$ORGANIZATION/$PROJECT" cd $HTML_DIR git init git checkout -b gh-pages git add * # A nojekyll file is needed to tell github that this is *not* a jekyll site: touch .nojekyll git add .nojekyll git commit -a -m "Documentation build - no history" git remote add origin $upstream_repo git push origin gh-pages --force rm -rf .git # Yes